--- /dev/null
+libgdruntime.so.3 libgphobos3 #MINVER#
+ CPU_ALLOC@Base 12
+ CPU_ALLOC_SIZE@Base 12
+ CPU_COUNT@Base 12
+ CPU_COUNT_S@Base 12
+ CPU_FREE@Base 12
+ CPU_ISSET@Base 12
+ CPU_SET@Base 12
+ LOG_MASK@Base 12
+ LOG_UPTO@Base 12
+ SIGRTMAX@Base 12
+ SIGRTMIN@Base 12
+ S_TYPEISMQ@Base 12
+ S_TYPEISSEM@Base 12
+ S_TYPEISSHM@Base 12
+ _D101TypeInfo_E4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl7AddType6__initZ@Base 12
+ _D101TypeInfo_S4core8demangle__T6mangleTFMDFyPS6object10ModuleInfoZiZiZQBnFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D102TypeInfo_xE4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl7AddType6__initZ@Base 12
+ _D103TypeInfo_S4core4time8Duration__T5splitVAyaa7_7365636f6e6473VQva5_6e73656373ZQBsMxFNaNbNiNfZ10SplitUnits6__initZ@Base 12
+ _D105TypeInfo_E4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl10IsDelegate6__initZ@Base 12
+ _D10TypeInfo_a6__initZ@Base 12
+ _D10TypeInfo_a6__vtblZ@Base 12
+ _D10TypeInfo_a7__ClassZ@Base 12
+ _D10TypeInfo_b6__initZ@Base 12
+ _D10TypeInfo_b6__vtblZ@Base 12
+ _D10TypeInfo_b7__ClassZ@Base 12
+ _D10TypeInfo_c6__initZ@Base 12
+ _D10TypeInfo_c6__vtblZ@Base 12
+ _D10TypeInfo_c7__ClassZ@Base 12
+ _D10TypeInfo_d6__initZ@Base 12
+ _D10TypeInfo_d6__vtblZ@Base 12
+ _D10TypeInfo_d7__ClassZ@Base 12
+ _D10TypeInfo_e6__initZ@Base 12
+ _D10TypeInfo_e6__vtblZ@Base 12
+ _D10TypeInfo_e7__ClassZ@Base 12
+ _D10TypeInfo_f6__initZ@Base 12
+ _D10TypeInfo_f6__vtblZ@Base 12
+ _D10TypeInfo_f7__ClassZ@Base 12
+ _D10TypeInfo_g6__initZ@Base 12
+ _D10TypeInfo_g6__vtblZ@Base 12
+ _D10TypeInfo_g7__ClassZ@Base 12
+ _D10TypeInfo_h6__initZ@Base 12
+ _D10TypeInfo_h6__vtblZ@Base 12
+ _D10TypeInfo_h7__ClassZ@Base 12
+ _D10TypeInfo_i6__initZ@Base 12
+ _D10TypeInfo_i6__vtblZ@Base 12
+ _D10TypeInfo_i7__ClassZ@Base 12
+ _D10TypeInfo_j6__initZ@Base 12
+ _D10TypeInfo_j6__vtblZ@Base 12
+ _D10TypeInfo_j7__ClassZ@Base 12
+ _D10TypeInfo_k6__initZ@Base 12
+ _D10TypeInfo_k6__vtblZ@Base 12
+ _D10TypeInfo_k7__ClassZ@Base 12
+ _D10TypeInfo_l6__initZ@Base 12
+ _D10TypeInfo_l6__vtblZ@Base 12
+ _D10TypeInfo_l7__ClassZ@Base 12
+ _D10TypeInfo_m6__initZ@Base 12
+ _D10TypeInfo_m6__vtblZ@Base 12
+ _D10TypeInfo_m7__ClassZ@Base 12
+ _D10TypeInfo_n6__initZ@Base 12
+ _D10TypeInfo_n6__vtblZ@Base 12
+ _D10TypeInfo_n7__ClassZ@Base 12
+ _D10TypeInfo_o6__initZ@Base 12
+ _D10TypeInfo_o6__vtblZ@Base 12
+ _D10TypeInfo_o7__ClassZ@Base 12
+ _D10TypeInfo_p6__initZ@Base 12
+ _D10TypeInfo_p6__vtblZ@Base 12
+ _D10TypeInfo_p7__ClassZ@Base 12
+ _D10TypeInfo_q6__initZ@Base 12
+ _D10TypeInfo_q6__vtblZ@Base 12
+ _D10TypeInfo_q7__ClassZ@Base 12
+ _D10TypeInfo_r6__initZ@Base 12
+ _D10TypeInfo_r6__vtblZ@Base 12
+ _D10TypeInfo_r7__ClassZ@Base 12
+ _D10TypeInfo_s6__initZ@Base 12
+ _D10TypeInfo_s6__vtblZ@Base 12
+ _D10TypeInfo_s7__ClassZ@Base 12
+ _D10TypeInfo_t6__initZ@Base 12
+ _D10TypeInfo_t6__vtblZ@Base 12
+ _D10TypeInfo_t7__ClassZ@Base 12
+ _D10TypeInfo_u6__initZ@Base 12
+ _D10TypeInfo_u6__vtblZ@Base 12
+ _D10TypeInfo_u7__ClassZ@Base 12
+ _D10TypeInfo_v6__initZ@Base 12
+ _D10TypeInfo_v6__vtblZ@Base 12
+ _D10TypeInfo_v7__ClassZ@Base 12
+ _D10TypeInfo_w6__initZ@Base 12
+ _D10TypeInfo_w6__vtblZ@Base 12
+ _D10TypeInfo_w7__ClassZ@Base 12
+ _D113TypeInfo_S4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZvZQBzFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D115TypeInfo_S4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZQBkZQCbFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D115TypeInfo_S4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi0ZQpZQCf6__initZ@Base 12
+ _D115TypeInfo_S4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi1ZQpZQCf6__initZ@Base 12
+ _D116TypeInfo_xS4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi0ZQpZQCf6__initZ@Base 12
+ _D116TypeInfo_xS4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi1ZQpZQCf6__initZ@Base 12
+ _D118TypeInfo_S4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu6__initZ@Base 12
+ _D118TypeInfo_S4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu6__initZ@Base 12
+ _D11TypeInfo_Aa6__initZ@Base 12
+ _D11TypeInfo_Aa6__vtblZ@Base 12
+ _D11TypeInfo_Aa7__ClassZ@Base 12
+ _D11TypeInfo_Ab6__initZ@Base 12
+ _D11TypeInfo_Ab6__vtblZ@Base 12
+ _D11TypeInfo_Ab7__ClassZ@Base 12
+ _D11TypeInfo_Ac6__initZ@Base 12
+ _D11TypeInfo_Ac6__vtblZ@Base 12
+ _D11TypeInfo_Ac7__ClassZ@Base 12
+ _D11TypeInfo_Ad6__initZ@Base 12
+ _D11TypeInfo_Ad6__vtblZ@Base 12
+ _D11TypeInfo_Ad7__ClassZ@Base 12
+ _D11TypeInfo_Ae6__initZ@Base 12
+ _D11TypeInfo_Ae6__vtblZ@Base 12
+ _D11TypeInfo_Ae7__ClassZ@Base 12
+ _D11TypeInfo_Af6__initZ@Base 12
+ _D11TypeInfo_Af6__vtblZ@Base 12
+ _D11TypeInfo_Af7__ClassZ@Base 12
+ _D11TypeInfo_Ag6__initZ@Base 12
+ _D11TypeInfo_Ag6__vtblZ@Base 12
+ _D11TypeInfo_Ag7__ClassZ@Base 12
+ _D11TypeInfo_Ah6__initZ@Base 12
+ _D11TypeInfo_Ah6__vtblZ@Base 12
+ _D11TypeInfo_Ah7__ClassZ@Base 12
+ _D11TypeInfo_Ai6__initZ@Base 12
+ _D11TypeInfo_Ai6__vtblZ@Base 12
+ _D11TypeInfo_Ai7__ClassZ@Base 12
+ _D11TypeInfo_Aj6__initZ@Base 12
+ _D11TypeInfo_Aj6__vtblZ@Base 12
+ _D11TypeInfo_Aj7__ClassZ@Base 12
+ _D11TypeInfo_Ak6__initZ@Base 12
+ _D11TypeInfo_Ak6__vtblZ@Base 12
+ _D11TypeInfo_Ak7__ClassZ@Base 12
+ _D11TypeInfo_Al6__initZ@Base 12
+ _D11TypeInfo_Al6__vtblZ@Base 12
+ _D11TypeInfo_Al7__ClassZ@Base 12
+ _D11TypeInfo_Am6__initZ@Base 12
+ _D11TypeInfo_Am6__vtblZ@Base 12
+ _D11TypeInfo_Am7__ClassZ@Base 12
+ _D11TypeInfo_Ao6__initZ@Base 12
+ _D11TypeInfo_Ao6__vtblZ@Base 12
+ _D11TypeInfo_Ao7__ClassZ@Base 12
+ _D11TypeInfo_Ap6__initZ@Base 12
+ _D11TypeInfo_Ap6__vtblZ@Base 12
+ _D11TypeInfo_Ap7__ClassZ@Base 12
+ _D11TypeInfo_Aq6__initZ@Base 12
+ _D11TypeInfo_Aq6__vtblZ@Base 12
+ _D11TypeInfo_Aq7__ClassZ@Base 12
+ _D11TypeInfo_Ar6__initZ@Base 12
+ _D11TypeInfo_Ar6__vtblZ@Base 12
+ _D11TypeInfo_Ar7__ClassZ@Base 12
+ _D11TypeInfo_As6__initZ@Base 12
+ _D11TypeInfo_As6__vtblZ@Base 12
+ _D11TypeInfo_As7__ClassZ@Base 12
+ _D11TypeInfo_At6__initZ@Base 12
+ _D11TypeInfo_At6__vtblZ@Base 12
+ _D11TypeInfo_At7__ClassZ@Base 12
+ _D11TypeInfo_Au6__initZ@Base 12
+ _D11TypeInfo_Au6__vtblZ@Base 12
+ _D11TypeInfo_Au7__ClassZ@Base 12
+ _D11TypeInfo_Av6__initZ@Base 12
+ _D11TypeInfo_Av6__vtblZ@Base 12
+ _D11TypeInfo_Av7__ClassZ@Base 12
+ _D11TypeInfo_Aw6__initZ@Base 12
+ _D11TypeInfo_Aw6__vtblZ@Base 12
+ _D11TypeInfo_Aw7__ClassZ@Base 12
+ _D11TypeInfo_Oa6__initZ@Base 12
+ _D11TypeInfo_Ou6__initZ@Base 12
+ _D11TypeInfo_xa6__initZ@Base 12
+ _D11TypeInfo_xb6__initZ@Base 12
+ _D11TypeInfo_xd6__initZ@Base 12
+ _D11TypeInfo_xe6__initZ@Base 12
+ _D11TypeInfo_xf6__initZ@Base 12
+ _D11TypeInfo_xh6__initZ@Base 12
+ _D11TypeInfo_xi6__initZ@Base 12
+ _D11TypeInfo_xk6__initZ@Base 12
+ _D11TypeInfo_xm6__initZ@Base 12
+ _D11TypeInfo_xv6__initZ@Base 12
+ _D11TypeInfo_ya6__initZ@Base 12
+ _D11TypeInfo_yk6__initZ@Base 12
+ _D120TypeInfo_S4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw6__initZ@Base 12
+ _D12TypeInfo_AOa6__initZ@Base 12
+ _D12TypeInfo_AOu6__initZ@Base 12
+ _D12TypeInfo_Axa6__initZ@Base 12
+ _D12TypeInfo_Axa6__vtblZ@Base 12
+ _D12TypeInfo_Axa7__ClassZ@Base 12
+ _D12TypeInfo_Axi6__initZ@Base 12
+ _D12TypeInfo_Axv6__initZ@Base 12
+ _D12TypeInfo_Aya6__initZ@Base 12
+ _D12TypeInfo_Aya6__vtblZ@Base 12
+ _D12TypeInfo_Aya7__ClassZ@Base 12
+ _D12TypeInfo_G8h6__initZ@Base 12
+ _D12TypeInfo_Oxk6__initZ@Base 12
+ _D12TypeInfo_Pxh6__initZ@Base 12
+ _D12TypeInfo_Pxv6__initZ@Base 12
+ _D12TypeInfo_xAa6__initZ@Base 12
+ _D12TypeInfo_xAi6__initZ@Base 12
+ _D12TypeInfo_xAv6__initZ@Base 12
+ _D12TypeInfo_xPh6__initZ@Base 12
+ _D12TypeInfo_xPv6__initZ@Base 12
+ _D12TypeInfo_yAa6__initZ@Base 12
+ _D13TypeInfo_AxPv6__initZ@Base 12
+ _D13TypeInfo_AyAa6__initZ@Base 12
+ _D13TypeInfo_Enum6__initZ@Base 12
+ _D13TypeInfo_Enum6__vtblZ@Base 12
+ _D13TypeInfo_Enum7__ClassZ@Base 12
+ _D13TypeInfo_G12a6__initZ@Base 12
+ _D13TypeInfo_G48a6__initZ@Base 12
+ _D13TypeInfo_xAPv6__initZ@Base 12
+ _D13TypeInfo_xAya6__initZ@Base 12
+ _D13TypeInfo_xG8h6__initZ@Base 12
+ _D14TypeInfo_Array6__initZ@Base 12
+ _D14TypeInfo_Array6__vtblZ@Base 12
+ _D14TypeInfo_Array7__ClassZ@Base 12
+ _D14TypeInfo_AxAya6__initZ@Base 12
+ _D14TypeInfo_Class6__initZ@Base 12
+ _D14TypeInfo_Class6__vtblZ@Base 12
+ _D14TypeInfo_Class7__ClassZ@Base 12
+ _D14TypeInfo_Const6__initZ@Base 12
+ _D14TypeInfo_Const6__vtblZ@Base 12
+ _D14TypeInfo_Const7__ClassZ@Base 12
+ _D14TypeInfo_HAxam6__initZ@Base 12
+ _D14TypeInfo_Inout6__initZ@Base 12
+ _D14TypeInfo_Inout6__vtblZ@Base 12
+ _D14TypeInfo_Inout7__ClassZ@Base 12
+ _D14TypeInfo_Tuple6__initZ@Base 12
+ _D14TypeInfo_Tuple6__vtblZ@Base 12
+ _D14TypeInfo_Tuple7__ClassZ@Base 12
+ _D14TypeInfo_xAAya6__initZ@Base 12
+ _D14TypeInfo_xG12a6__initZ@Base 12
+ _D14TypeInfo_xG48a6__initZ@Base 12
+ _D15TypeInfo_HAxaxm6__initZ@Base 12
+ _D15TypeInfo_Shared6__initZ@Base 12
+ _D15TypeInfo_Shared6__vtblZ@Base 12
+ _D15TypeInfo_Shared7__ClassZ@Base 12
+ _D15TypeInfo_Struct6__initZ@Base 12
+ _D15TypeInfo_Struct6__vtblZ@Base 12
+ _D15TypeInfo_Struct7__ClassZ@Base 12
+ _D15TypeInfo_Vector6__initZ@Base 12
+ _D15TypeInfo_Vector6__vtblZ@Base 12
+ _D15TypeInfo_Vector7__ClassZ@Base 12
+ _D15TypeInfo_xHAxam6__initZ@Base 12
+ _D16TypeInfo_HPxvAya6__initZ@Base 12
+ _D16TypeInfo_Pointer6__initZ@Base 12
+ _D16TypeInfo_Pointer6__vtblZ@Base 12
+ _D16TypeInfo_Pointer7__ClassZ@Base 12
+ _D17TypeInfo_Delegate6__initZ@Base 12
+ _D17TypeInfo_Delegate6__vtblZ@Base 12
+ _D17TypeInfo_Delegate7__ClassZ@Base 12
+ _D17TypeInfo_Function6__initZ@Base 12
+ _D17TypeInfo_Function6__vtblZ@Base 12
+ _D17TypeInfo_Function7__ClassZ@Base 12
+ _D18TypeInfo_Interface6__initZ@Base 12
+ _D18TypeInfo_Interface6__vtblZ@Base 12
+ _D18TypeInfo_Interface7__ClassZ@Base 12
+ _D18TypeInfo_Invariant6__initZ@Base 12
+ _D18TypeInfo_Invariant6__vtblZ@Base 12
+ _D18TypeInfo_Invariant7__ClassZ@Base 12
+ _D18TypeInfo_xC6Object6__initZ@Base 12
+ _D20TypeInfo_S2rt3aaA2AA6__initZ@Base 12
+ _D20TypeInfo_S6object2AA6__initZ@Base 12
+ _D20TypeInfo_StaticArray6__initZ@Base 12
+ _D20TypeInfo_StaticArray6__vtblZ@Base 12
+ _D20TypeInfo_StaticArray7__ClassZ@Base 12
+ _D22TypeInfo_FNbC6ObjectZv6__initZ@Base 12
+ _D22TypeInfo_S2rt3aaA4Impl6__initZ@Base 12
+ _D23TypeInfo_DFNbC6ObjectZv6__initZ@Base 12
+ _D23TypeInfo_S2rt3aaA5Range6__initZ@Base 12
+ _D24TypeInfo_S2rt3aaA6Bucket6__initZ@Base 12
+ _D24TypeInfo_S2rt5tlsgc4Data6__initZ@Base 12
+ _D24TypeInfo_xDFNbC6ObjectZv6__initZ@Base 12
+ _D25TypeInfo_AssociativeArray6__initZ@Base 12
+ _D25TypeInfo_AssociativeArray6__vtblZ@Base 12
+ _D25TypeInfo_AssociativeArray7__ClassZ@Base 12
+ _D25TypeInfo_AxDFNbC6ObjectZv6__initZ@Base 12
+ _D25TypeInfo_S4core6memory2GC6__initZ@Base 12
+ _D25TypeInfo_S6object7AARange6__initZ@Base 12
+ _D25TypeInfo_xADFNbC6ObjectZv6__initZ@Base 12
+ _D25TypeInfo_xS2rt3aaA6Bucket6__initZ@Base 12
+ _D26TypeInfo_AxS2rt3aaA6Bucket6__initZ@Base 12
+ _D26TypeInfo_S2rt6dmain25CArgs6__initZ@Base 12
+ _D26TypeInfo_xAS2rt3aaA6Bucket6__initZ@Base 12
+ _D27TypeInfo_S4core6int1284Cent6__initZ@Base 12
+ _D27TypeInfo_S6object9Interface6__initZ@Base 12
+ _D28TypeInfo_E2rt3aaA4Impl5Flags6__initZ@Base 12
+ _D28TypeInfo_S2rt8lifetime5Array6__initZ@Base 12
+ _D29TypeInfo_S2rt9profilegc5Entry6__initZ@Base 12
+ _D29TypeInfo_S4core4time8Duration6__initZ@Base 12
+ _D29TypeInfo_S4core5bitop7Split646__initZ@Base 12
+ _D29TypeInfo_S4core7runtime5CArgs6__initZ@Base 12
+ _D29TypeInfo_S6object10ModuleInfo6__initZ@Base 12
+ _D29TypeInfo_xE2rt3aaA4Impl5Flags6__initZ@Base 12
+ _D2rt11arrayassign11__moduleRefZ@Base 12
+ _D2rt11arrayassign12__ModuleInfoZ@Base 12
+ _D2rt3aaA10allocEntryFMxPSQyQx4ImplMxPvZPv@Base 12
+ _D2rt3aaA11__moduleRefZ@Base 12
+ _D2rt3aaA11fakeEntryTIFKSQxQw4ImplxC8TypeInfoxQlZ13tiMangledNameyAa@Base 12
+ _D2rt3aaA11fakeEntryTIFKSQxQw4ImplxC8TypeInfoxQlZC15TypeInfo_Struct@Base 12
+ _D2rt3aaA11rtinfoEntryFKSQxQw4ImplPymQdPmmZPyv@Base 12
+ _D2rt3aaA12__ModuleInfoZ@Base 12
+ _D2rt3aaA12allocBucketsFNaNbNemZASQBgQBg6Bucket@Base 12
+ _D2rt3aaA2AA5emptyMxFNaNbNdNiZb@Base 12
+ _D2rt3aaA2AA6__initZ@Base 12
+ _D2rt3aaA3mixFNaNbNiNfmZm@Base 12
+ _D2rt3aaA4Impl11__xopEqualsMxFKxSQBfQBfQBeZb@Base 12
+ _D2rt3aaA4Impl14findSlotInsertMNgFNaNbNimZPNgSQBsQBs6Bucket@Base 12
+ _D2rt3aaA4Impl14findSlotLookupMNgFmMxPvMxC8TypeInfoZPNgSQCcQCc6Bucket@Base 12
+ _D2rt3aaA4Impl3dimMxFNaNbNdNiNfZm@Base 12
+ _D2rt3aaA4Impl4growMFMxC8TypeInfoZv@Base 12
+ _D2rt3aaA4Impl4maskMxFNaNbNdNiZm@Base 12
+ _D2rt3aaA4Impl5clearMFNaNbZv@Base 12
+ _D2rt3aaA4Impl6__ctorMFNcMxC25TypeInfo_AssociativeArraymZSQCeQCeQCd@Base 12
+ _D2rt3aaA4Impl6__initZ@Base 12
+ _D2rt3aaA4Impl6lengthMxFNaNbNdNiZm@Base 12
+ _D2rt3aaA4Impl6resizeMFNaNbmZv@Base 12
+ _D2rt3aaA4Impl6shrinkMFMxC8TypeInfoZv@Base 12
+ _D2rt3aaA4Impl9__xtoHashFNbNeKxSQBeQBeQBdZm@Base 12
+ _D2rt3aaA5Range6__initZ@Base 12
+ _D2rt3aaA6Bucket5emptyMxFNaNbNdNiZb@Base 12
+ _D2rt3aaA6Bucket6__initZ@Base 12
+ _D2rt3aaA6Bucket6filledMxFNaNbNdNiNfZb@Base 12
+ _D2rt3aaA6Bucket7deletedMxFNaNbNdNiZb@Base 12
+ _D2rt3aaA6talignFNaNbNiNfmmZm@Base 12
+ _D2rt3aaA7hasDtorFxC8TypeInfoZb@Base 12
+ _D2rt3aaA8calcHashFMxPvMxC8TypeInfoZm@Base 12
+ _D2rt3aaA8nextpow2FNaNbNixmZm@Base 12
+ _D2rt3aaA9entryDtorFPvxC15TypeInfo_StructZv@Base 12
+ _D2rt3aaA9getRTInfoFxC8TypeInfoZPyv@Base 12
+ _D2rt3adi11__moduleRefZ@Base 12
+ _D2rt3adi12__ModuleInfoZ@Base 12
+ _D2rt3deh11__moduleRefZ@Base 12
+ _D2rt3deh12__ModuleInfoZ@Base 12
+ _D2rt4util7utility10safeAssertFNbNiNfbMAyaMQemZv@Base 12
+ _D2rt4util7utility11__moduleRefZ@Base 12
+ _D2rt4util7utility12__ModuleInfoZ@Base 12
+ _D2rt4util7utility__T8_ComplexTdZQm11__xopEqualsMxFKxSQCaQCaQBy__TQBtTdZQBzZb@Base 12
+ _D2rt4util7utility__T8_ComplexTdZQm6__initZ@Base 12
+ _D2rt4util7utility__T8_ComplexTdZQm9__xtoHashFNbNeKxSQBzQBzQBx__TQBsTdZQByZm@Base 12
+ _D2rt4util7utility__T8_ComplexTeZQm11__xopEqualsMxFKxSQCaQCaQBy__TQBtTeZQBzZb@Base 12
+ _D2rt4util7utility__T8_ComplexTeZQm6__initZ@Base 12
+ _D2rt4util7utility__T8_ComplexTeZQm9__xtoHashFNbNeKxSQBzQBzQBx__TQBsTeZQByZm@Base 12
+ _D2rt4util7utility__T8_ComplexTfZQm11__xopEqualsMxFKxSQCaQCaQBy__TQBtTfZQBzZb@Base 12
+ _D2rt4util7utility__T8_ComplexTfZQm6__initZ@Base 12
+ _D2rt4util7utility__T8_ComplexTfZQm9__xtoHashFNbNeKxSQBzQBzQBx__TQBsTfZQByZm@Base 12
+ _D2rt4util8typeinfo10TypeInfo_c8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo10TypeInfo_j8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo10TypeInfo_n11initializerMxFNaNbNiNeZAxv@Base 12
+ _D2rt4util8typeinfo10TypeInfo_n4swapMxFNaNbNiNfPvQcZv@Base 12
+ _D2rt4util8typeinfo10TypeInfo_n5tsizeMxFNaNbNdNiNfZm@Base 12
+ _D2rt4util8typeinfo10TypeInfo_n6equalsMxFNaNbNiNfIPvIQdZb@Base 12
+ _D2rt4util8typeinfo10TypeInfo_n6rtInfoMxFNaNbNdNiNfZPyv@Base 12
+ _D2rt4util8typeinfo10TypeInfo_n7compareMxFNaNbNiNfIPvIQdZi@Base 12
+ _D2rt4util8typeinfo10TypeInfo_n7getHashMxFNaNbNiNfMxPvZm@Base 12
+ _D2rt4util8typeinfo10TypeInfo_n8toStringMxFNaNbNiNfZAya@Base 12
+ _D2rt4util8typeinfo10TypeInfo_o8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo10TypeInfo_p8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo10TypeInfo_q8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo10TypeInfo_r8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo10TypeInfo_v5flagsMxFNaNbNdNiNeZk@Base 12
+ _D2rt4util8typeinfo10TypeInfo_v7getHashMxFNaNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo10TypeInfo_v8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo11TypeInfo_Ac8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo11TypeInfo_Aj8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo11TypeInfo_Ao8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo11TypeInfo_Ap8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo11TypeInfo_Aq8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo11TypeInfo_Ar8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo11TypeInfo_Av4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo11TypeInfo_Av8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo11__moduleRefZ@Base 12
+ _D2rt4util8typeinfo12TypeInfo_Aya8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo12__ModuleInfoZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility16__c_complex_realTQBiZQCe11initializerMxFNaNbNeZ1cyG1EQEeQEeQCrQCm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility16__c_complex_realTQBiZQCe11initializerMxFNaNbNiNeZAxv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility16__c_complex_realTQBiZQCe4swapMxFNaNbNePvQcZv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility16__c_complex_realTQBiZQCe5tsizeMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility16__c_complex_realTQBiZQCe6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility16__c_complex_realTQBiZQCe6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility16__c_complex_realTQBiZQCe6equalsMxFNaNbNeIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility16__c_complex_realTQBiZQCe6rtInfoMxFNaNbNdNiNeZPyv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility16__c_complex_realTQBiZQCe6talignMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility16__c_complex_realTQBiZQCe7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility16__c_complex_realTQBiZQCe7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility16__c_complex_realTQBiZQCe7getHashMxFNaNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility16__c_complex_realTQBiZQCe8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility17__c_complex_floatTQBjZQCf11initializerMxFNaNbNeZ1cyG1EQEfQEfQCsQCn@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility17__c_complex_floatTQBjZQCf11initializerMxFNaNbNiNeZAxv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility17__c_complex_floatTQBjZQCf4swapMxFNaNbNePvQcZv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility17__c_complex_floatTQBjZQCf5flagsMxFNaNbNdNiNeZk@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility17__c_complex_floatTQBjZQCf5tsizeMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility17__c_complex_floatTQBjZQCf6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility17__c_complex_floatTQBjZQCf6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility17__c_complex_floatTQBjZQCf6equalsMxFNaNbNeIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility17__c_complex_floatTQBjZQCf6rtInfoMxFNaNbNdNiNeZPyv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility17__c_complex_floatTQBjZQCf6talignMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility17__c_complex_floatTQBjZQCf7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility17__c_complex_floatTQBjZQCf7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility17__c_complex_floatTQBjZQCf7getHashMxFNaNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility17__c_complex_floatTQBjZQCf8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility18__c_complex_doubleTQBkZQCg11initializerMxFNaNbNeZ1cyG1EQEgQEgQCtQCo@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility18__c_complex_doubleTQBkZQCg11initializerMxFNaNbNiNeZAxv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility18__c_complex_doubleTQBkZQCg4swapMxFNaNbNePvQcZv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility18__c_complex_doubleTQBkZQCg5flagsMxFNaNbNdNiNeZk@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility18__c_complex_doubleTQBkZQCg5tsizeMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility18__c_complex_doubleTQBkZQCg6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility18__c_complex_doubleTQBkZQCg6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility18__c_complex_doubleTQBkZQCg6equalsMxFNaNbNeIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility18__c_complex_doubleTQBkZQCg6rtInfoMxFNaNbNdNiNeZPyv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility18__c_complex_doubleTQBkZQCg6talignMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility18__c_complex_doubleTQBkZQCg7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility18__c_complex_doubleTQBkZQCg7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility18__c_complex_doubleTQBkZQCg7getHashMxFNaNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility18__c_complex_doubleTQBkZQCg8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTaThZQw11initializerMxFNaNbNeZ1cyG1a@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTaThZQw11initializerMxFNaNbNiNeZAxv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTaThZQw6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTaThZQw6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTaThZQw7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTaThZQw8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTbThZQw6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTbThZQw6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTbThZQw7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTbThZQw7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTbThZQw8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTdTdZQw11initializerMxFNaNbNeZ1cyG1d@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTdTdZQw11initializerMxFNaNbNiNeZAxv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTdTdZQw4swapMxFNaNbNePvQcZv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTdTdZQw5flagsMxFNaNbNdNiNeZk@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTdTdZQw5tsizeMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTdTdZQw6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTdTdZQw6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTdTdZQw6equalsMxFNaNbNeIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTdTdZQw6rtInfoMxFNaNbNdNiNeZPyv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTdTdZQw6talignMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTdTdZQw7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTdTdZQw7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTdTdZQw7getHashMxFNaNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTdTdZQw8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTeTeZQw11initializerMxFNaNbNeZ1cyG1e@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTeTeZQw11initializerMxFNaNbNiNeZAxv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTeTeZQw4swapMxFNaNbNePvQcZv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTeTeZQw5tsizeMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTeTeZQw6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTeTeZQw6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTeTeZQw6equalsMxFNaNbNeIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTeTeZQw6rtInfoMxFNaNbNdNiNeZPyv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTeTeZQw6talignMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTeTeZQw7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTeTeZQw7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTeTeZQw7getHashMxFNaNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTeTeZQw8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTfTfZQw11initializerMxFNaNbNeZ1cyG1f@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTfTfZQw11initializerMxFNaNbNiNeZAxv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTfTfZQw4swapMxFNaNbNePvQcZv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTfTfZQw5flagsMxFNaNbNdNiNeZk@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTfTfZQw5tsizeMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTfTfZQw6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTfTfZQw6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTfTfZQw6equalsMxFNaNbNeIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTfTfZQw6rtInfoMxFNaNbNdNiNeZPyv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTfTfZQw6talignMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTfTfZQw7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTfTfZQw7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTfTfZQw7getHashMxFNaNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTfTfZQw8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTgThZQw6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTgThZQw6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTgThZQw7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTgThZQw7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTgThZQw8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericThThZQw11initializerMxFNaNbNiNeZAxv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericThThZQw4swapMxFNaNbNePvQcZv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericThThZQw5tsizeMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericThThZQw6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericThThZQw6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericThThZQw6equalsMxFNaNbNeIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericThThZQw6rtInfoMxFNaNbNdNiNeZPyv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericThThZQw6talignMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericThThZQw7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericThThZQw7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericThThZQw7getHashMxFNaNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericThThZQw8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTiTkZQw6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTiTkZQw6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTiTkZQw7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTiTkZQw7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTiTkZQw8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTkTkZQw11initializerMxFNaNbNiNeZAxv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTkTkZQw4swapMxFNaNbNePvQcZv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTkTkZQw5tsizeMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTkTkZQw6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTkTkZQw6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTkTkZQw6equalsMxFNaNbNeIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTkTkZQw6rtInfoMxFNaNbNdNiNeZPyv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTkTkZQw6talignMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTkTkZQw7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTkTkZQw7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTkTkZQw7getHashMxFNaNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTkTkZQw8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTlTmZQw6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTlTmZQw6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTlTmZQw7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTlTmZQw7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTlTmZQw8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTmTmZQw11initializerMxFNaNbNiNeZAxv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTmTmZQw4swapMxFNaNbNePvQcZv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTmTmZQw5tsizeMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTmTmZQw6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTmTmZQw6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTmTmZQw6equalsMxFNaNbNeIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTmTmZQw6rtInfoMxFNaNbNdNiNeZPyv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTmTmZQw6talignMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTmTmZQw7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTmTmZQw7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTmTmZQw7getHashMxFNaNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTmTmZQw8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTsTtZQw6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTsTtZQw6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTsTtZQw7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTsTtZQw7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTsTtZQw8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTtTtZQw11initializerMxFNaNbNiNeZAxv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTtTtZQw4swapMxFNaNbNePvQcZv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTtTtZQw5tsizeMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTtTtZQw6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTtTtZQw6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTtTtZQw6equalsMxFNaNbNeIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTtTtZQw6rtInfoMxFNaNbNdNiNeZPyv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTtTtZQw6talignMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTtTtZQw7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTtTtZQw7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTtTtZQw7getHashMxFNaNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTtTtZQw8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTuTtZQw11initializerMxFNaNbNeZ1cyG1u@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTuTtZQw11initializerMxFNaNbNiNeZAxv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTuTtZQw6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTuTtZQw6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTuTtZQw7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTuTtZQw8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTwTkZQw11initializerMxFNaNbNeZ1cyG1w@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTwTkZQw11initializerMxFNaNbNiNeZAxv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTwTkZQw6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTwTkZQw6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTwTkZQw7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTwTkZQw7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTwTkZQw8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility16__c_complex_realTQBiZQCj4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility16__c_complex_realTQBiZQCj6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility16__c_complex_realTQBiZQCj6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility16__c_complex_realTQBiZQCj6equalsMxFIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility16__c_complex_realTQBiZQCj7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility16__c_complex_realTQBiZQCj7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility16__c_complex_realTQBiZQCj7getHashMxFNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility16__c_complex_realTQBiZQCj8opEqualsMxFNbNfxC6ObjectZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility16__c_complex_realTQBiZQCj8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility17__c_complex_floatTQBjZQCk4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility17__c_complex_floatTQBjZQCk6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility17__c_complex_floatTQBjZQCk6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility17__c_complex_floatTQBjZQCk6equalsMxFIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility17__c_complex_floatTQBjZQCk7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility17__c_complex_floatTQBjZQCk7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility17__c_complex_floatTQBjZQCk7getHashMxFNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility17__c_complex_floatTQBjZQCk8opEqualsMxFNbNfxC6ObjectZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility17__c_complex_floatTQBjZQCk8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility18__c_complex_doubleTQBkZQCl4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility18__c_complex_doubleTQBkZQCl6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility18__c_complex_doubleTQBkZQCl6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility18__c_complex_doubleTQBkZQCl6equalsMxFIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility18__c_complex_doubleTQBkZQCl7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility18__c_complex_doubleTQBkZQCl7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility18__c_complex_doubleTQBkZQCl7getHashMxFNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility18__c_complex_doubleTQBkZQCl8opEqualsMxFNbNfxC6ObjectZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility18__c_complex_doubleTQBkZQCl8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTaThZQBb4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTaThZQBb6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTaThZQBb6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTaThZQBb7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTaThZQBb8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTbThZQBb4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTbThZQBb6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTbThZQBb6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTbThZQBb7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTbThZQBb7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTbThZQBb8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTdTdZQBb4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTdTdZQBb6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTdTdZQBb6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTdTdZQBb6equalsMxFIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTdTdZQBb7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTdTdZQBb7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTdTdZQBb7getHashMxFNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTdTdZQBb8opEqualsMxFNbNfxC6ObjectZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTdTdZQBb8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTeTeZQBb4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTeTeZQBb6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTeTeZQBb6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTeTeZQBb6equalsMxFIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTeTeZQBb7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTeTeZQBb7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTeTeZQBb7getHashMxFNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTeTeZQBb8opEqualsMxFNbNfxC6ObjectZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTeTeZQBb8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTfTfZQBb4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTfTfZQBb6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTfTfZQBb6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTfTfZQBb6equalsMxFIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTfTfZQBb7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTfTfZQBb7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTfTfZQBb7getHashMxFNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTfTfZQBb8opEqualsMxFNbNfxC6ObjectZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTfTfZQBb8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTgThZQBb4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTgThZQBb6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTgThZQBb6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTgThZQBb7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTgThZQBb7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTgThZQBb8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericThThZQBb4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericThThZQBb6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericThThZQBb6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericThThZQBb6equalsMxFIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericThThZQBb7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericThThZQBb7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericThThZQBb7getHashMxFNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericThThZQBb8opEqualsMxFNbNfxC6ObjectZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericThThZQBb8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTiTkZQBb4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTiTkZQBb6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTiTkZQBb6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTiTkZQBb7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTiTkZQBb7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTiTkZQBb8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTkTkZQBb4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTkTkZQBb6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTkTkZQBb6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTkTkZQBb6equalsMxFIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTkTkZQBb7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTkTkZQBb7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTkTkZQBb7getHashMxFNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTkTkZQBb8opEqualsMxFNbNfxC6ObjectZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTkTkZQBb8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTlTmZQBb4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTlTmZQBb6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTlTmZQBb6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTlTmZQBb7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTlTmZQBb7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTlTmZQBb8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTmTmZQBb4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTmTmZQBb6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTmTmZQBb6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTmTmZQBb6equalsMxFIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTmTmZQBb7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTmTmZQBb7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTmTmZQBb7getHashMxFNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTmTmZQBb8opEqualsMxFNbNfxC6ObjectZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTmTmZQBb8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTsTtZQBb4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTsTtZQBb6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTsTtZQBb6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTsTtZQBb7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTsTtZQBb7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTsTtZQBb8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTtTtZQBb4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTtTtZQBb6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTtTtZQBb6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTtTtZQBb6equalsMxFIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTtTtZQBb7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTtTtZQBb7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTtTtZQBb7getHashMxFNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTtTtZQBb8opEqualsMxFNbNfxC6ObjectZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTtTtZQBb8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTuTtZQBb4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTuTtZQBb6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTuTtZQBb6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTuTtZQBb7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTuTtZQBb8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTwTkZQBb4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTwTkZQBb6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTwTkZQBb6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTwTkZQBb7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTwTkZQBb7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTwTkZQBb8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTxaTxaZQBd4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTxaTxaZQBd6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTxaTxaZQBd6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTxaTxaZQBd6equalsMxFIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTxaTxaZQBd7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTxaTxaZQBd7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTxaTxaZQBd7getHashMxFNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTxaTxaZQBd8opEqualsMxFNbNfxC6ObjectZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTxaTxaZQBd8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTyaTyaZQBd4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTyaTyaZQBd6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTyaTyaZQBd6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTyaTyaZQBd6equalsMxFIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTyaTyaZQBd7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTyaTyaZQBd7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTyaTyaZQBd7getHashMxFNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTyaTyaZQBd8opEqualsMxFNbNfxC6ObjectZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTyaTyaZQBd8toStringMxFNbNfZAya@Base 12
+ _D2rt5cast_11__moduleRefZ@Base 12
+ _D2rt5cast_12__ModuleInfoZ@Base 12
+ _D2rt5cast_18areClassInfosEqualFNaNbNiNfMxC14TypeInfo_ClassMxQtZb@Base 12
+ _D2rt5minfo11ModuleGroup11__xopEqualsMxFKxSQBpQBpQBmZb@Base 12
+ _D2rt5minfo11ModuleGroup11runTlsCtorsMFZv@Base 12
+ _D2rt5minfo11ModuleGroup11runTlsDtorsMFZv@Base 12
+ _D2rt5minfo11ModuleGroup12genCyclePathMFmmAAiZAm@Base 12
+ _D2rt5minfo11ModuleGroup12sortCtorsOldMFAAiZ8StackRec11__xopEqualsMxFKxSQCsQCsQCpQCfMFQBuZQBuZb@Base 12
+ _D2rt5minfo11ModuleGroup12sortCtorsOldMFAAiZ8StackRec3modMFNdZi@Base 12
+ _D2rt5minfo11ModuleGroup12sortCtorsOldMFAAiZ8StackRec6__initZ@Base 12
+ _D2rt5minfo11ModuleGroup12sortCtorsOldMFAAiZ8StackRec9__xtoHashFNbNeKxSQCrQCrQCoQCeMFQBtZQBtZm@Base 12
+ _D2rt5minfo11ModuleGroup12sortCtorsOldMFAAiZb@Base 12
+ _D2rt5minfo11ModuleGroup4freeMFZv@Base 12
+ _D2rt5minfo11ModuleGroup6__ctorMFNbNcNiAyPS6object10ModuleInfoZSQCkQCkQCh@Base 12
+ _D2rt5minfo11ModuleGroup6__initZ@Base 12
+ _D2rt5minfo11ModuleGroup7modulesMxFNbNdNiZAyPS6object10ModuleInfo@Base 12
+ _D2rt5minfo11ModuleGroup8runCtorsMFZv@Base 12
+ _D2rt5minfo11ModuleGroup8runDtorsMFZv@Base 12
+ _D2rt5minfo11ModuleGroup9__xtoHashFNbNeKxSQBoQBoQBlZm@Base 12
+ _D2rt5minfo11ModuleGroup9sortCtorsMFAyaZ8findDepsMFmPmZ10stackFrame6__initZ@Base 12
+ _D2rt5minfo11ModuleGroup9sortCtorsMFAyaZv@Base 12
+ _D2rt5minfo11ModuleGroup9sortCtorsMFZv@Base 12
+ _D2rt5minfo11__moduleRefZ@Base 12
+ _D2rt5minfo12__ModuleInfoZ@Base 12
+ _D2rt5minfo17moduleinfos_applyFMDFyPS6object10ModuleInfoZiZi@Base 12
+ _D2rt5tlsgc11__moduleRefZ@Base 12
+ _D2rt5tlsgc12__ModuleInfoZ@Base 12
+ _D2rt5tlsgc14processGCMarksFNbPvMDFNbQhZiZv@Base 12
+ _D2rt5tlsgc4Data6__initZ@Base 12
+ _D2rt5tlsgc4initFNbNiZPv@Base 12
+ _D2rt5tlsgc4scanFNbPvMDFNbQhQjZvZv@Base 12
+ _D2rt5tlsgc7destroyFNbNiPvZv@Base 12
+ _D2rt6aApply11__moduleRefZ@Base 12
+ _D2rt6aApply12__ModuleInfoZ@Base 12
+ _D2rt6config11__moduleRefZ@Base 12
+ _D2rt6config12__ModuleInfoZ@Base 12
+ _D2rt6config13rt_linkOptionFNbNiAyaMDFNbNiQkZQnZQq@Base 12
+ _D2rt6config15rt_configOptionFNbNiAyaMDFNbNiQkZQnbZQr@Base 12
+ _D2rt6config16rt_cmdlineOptionFNbNiAyaMDFNbNiQkZQnZQq@Base 12
+ _D2rt6config16rt_envvarsOptionFNbNiAyaMDFNbNiQkZQnZQq@Base 12
+ _D2rt6dmain210_initCountOm@Base 12
+ _D2rt6dmain211__moduleRefZ@Base 12
+ _D2rt6dmain212__ModuleInfoZ@Base 12
+ _D2rt6dmain212traceHandlerPFPvZC6object9Throwable9TraceInfo@Base 12
+ _D2rt6dmain214UnitTestResult6__initZ@Base 12
+ _D2rt6dmain215formatThrowableFC6object9ThrowableMDFNbIAaZvZv@Base 12
+ _D2rt6dmain221parseExceptionOptionsFNbNiZb@Base 12
+ _D2rt6dmain25CArgs6__initZ@Base 12
+ _D2rt6dmain26_cArgsSQsQr5CArgs@Base 12
+ _D2rt6dmain27_d_argsAAya@Base 12
+ _D2rt6memory11__moduleRefZ@Base 12
+ _D2rt6memory12__ModuleInfoZ@Base 12
+ _D2rt6memory16initStaticDataGCFZv@Base 12
+ _D2rt7aApplyR11__moduleRefZ@Base 12
+ _D2rt7aApplyR12__ModuleInfoZ@Base 12
+ _D2rt7ehalloc11__moduleRefZ@Base 12
+ _D2rt7ehalloc12__ModuleInfoZ@Base 12
+ _D2rt8arraycat11__moduleRefZ@Base 12
+ _D2rt8arraycat12__ModuleInfoZ@Base 12
+ _D2rt8lifetime10__arrayPadFNaNbNemxC8TypeInfoZm@Base 12
+ _D2rt8lifetime10__blkcacheFNbNdZPS4core6memory8BlkInfo_@Base 12
+ _D2rt8lifetime11__moduleRefZ@Base 12
+ _D2rt8lifetime11hasPostblitFIC8TypeInfoZb@Base 12
+ _D2rt8lifetime11newCapacityFmmZm@Base 12
+ _D2rt8lifetime12__ModuleInfoZ@Base 12
+ _D2rt8lifetime12__arrayAllocFNaNbmMxC8TypeInfoxQlZS4core6memory8BlkInfo_@Base 12
+ _D2rt8lifetime12__arrayAllocFmKS4core6memory8BlkInfo_MxC8TypeInfoxQlZQBm@Base 12
+ _D2rt8lifetime12__arrayStartFNaNbNkMS4core6memory8BlkInfo_ZPv@Base 12
+ _D2rt8lifetime12__doPostblitFPvmxC8TypeInfoZv@Base 12
+ _D2rt8lifetime12__getBlkInfoFNbPvZPS4core6memory8BlkInfo_@Base 12
+ _D2rt8lifetime12__nextBlkIdxi@Base 12
+ _D2rt8lifetime14collectHandlerPFC6ObjectZb@Base 12
+ _D2rt8lifetime14finalize_arrayFPvmxC15TypeInfo_StructZv@Base 12
+ _D2rt8lifetime14processGCMarksFNbPS4core6memory8BlkInfo_MDFNbPvZiZv@Base 12
+ _D2rt8lifetime15__arrayClearPadFNaNbKS4core6memory8BlkInfo_mmZv@Base 12
+ _D2rt8lifetime15finalize_array2FNbPvmZv@Base 12
+ _D2rt8lifetime15finalize_structFNbPvmZv@Base 12
+ _D2rt8lifetime18__arrayAllocLengthFNaNbKS4core6memory8BlkInfo_xC8TypeInfoZm@Base 12
+ _D2rt8lifetime18__blkcache_storagePS4core6memory8BlkInfo_@Base 12
+ _D2rt8lifetime18structTypeInfoSizeFNaNbNixC8TypeInfoZm@Base 12
+ _D2rt8lifetime19_d_arraysetlengthiTUxC8TypeInfomPAvZ12doInitializeFNaNbNiPvQcxAvZv@Base 12
+ _D2rt8lifetime19_staticDtor_L503_C1FZv@Base 12
+ _D2rt8lifetime20ArrayAllocLengthLock6__initZ@Base 12
+ _D2rt8lifetime20ArrayAllocLengthLock6__vtblZ@Base 12
+ _D2rt8lifetime20ArrayAllocLengthLock7__ClassZ@Base 12
+ _D2rt8lifetime20__insertBlkInfoCacheFNbS4core6memory8BlkInfo_PQxZv@Base 12
+ _D2rt8lifetime21__setArrayAllocLengthFNaNbKS4core6memory8BlkInfo_mbxC8TypeInfomZb@Base 12
+ _D2rt8lifetime26hasArrayFinalizerInSegmentFNbPvmIAvZi@Base 12
+ _D2rt8lifetime27hasStructFinalizerInSegmentFNbPvmIAvZi@Base 12
+ _D2rt8lifetime5Array6__initZ@Base 12
+ _D2rt8lifetime9unqualifyFNaNbNiNkMNgC8TypeInfoZNgQn@Base 12
+ _D2rt8lifetime__T14_d_newarrayOpTX12_d_newarrayTZQBgFNaNbxC8TypeInfoAmZAv@Base 12
+ _D2rt8lifetime__T14_d_newarrayOpTX13_d_newarrayiTZQBhFNaNbxC8TypeInfoAmZAv@Base 12
+ _D2rt8monitor_10getMonitorFNaNbNiC6ObjectZPOSQBrQBr7Monitor@Base 12
+ _D2rt8monitor_10setMonitorFNaNbNiC6ObjectPOSQBqQBq7MonitorZv@Base 12
+ _D2rt8monitor_11__moduleRefZ@Base 12
+ _D2rt8monitor_11unlockMutexFNbNiPS4core3sys5posixQk5types15pthread_mutex_tZv@Base 12
+ _D2rt8monitor_12__ModuleInfoZ@Base 12
+ _D2rt8monitor_12destroyMutexFNbNiPS4core3sys5posixQk5types15pthread_mutex_tZv@Base 12
+ _D2rt8monitor_12disposeEventFNbPSQBfQBf7MonitorC6ObjectZv@Base 12
+ _D2rt8monitor_13deleteMonitorFNbNiPSQBiQBi7MonitorZv@Base 12
+ _D2rt8monitor_13ensureMonitorFNbC6ObjectZPOSQBqQBq7Monitor@Base 12
+ _D2rt8monitor_4gmtxS4core3sys5posixQk5types15pthread_mutex_t@Base 12
+ _D2rt8monitor_5gattrS4core3sys5posixQk5types19pthread_mutexattr_t@Base 12
+ _D2rt8monitor_7Monitor11__xopEqualsMxFKxSQBnQBnQBhZb@Base 12
+ _D2rt8monitor_7Monitor6__initZ@Base 12
+ _D2rt8monitor_7Monitor9__xtoHashFNbNeKxSQBmQBmQBgZm@Base 12
+ _D2rt8monitor_7monitorFNaNbNcNdNiNkMC6ObjectZOPSQBuQBu7Monitor@Base 12
+ _D2rt8monitor_9initMutexFNbNiPS4core3sys5posixQk5types15pthread_mutex_tZv@Base 12
+ _D2rt8monitor_9lockMutexFNbNiPS4core3sys5posixQk5types15pthread_mutex_tZv@Base 12
+ _D2rt8sections11__moduleRefZ@Base 12
+ _D2rt8sections12__ModuleInfoZ@Base 12
+ _D2rt8sections20scanDataSegPreciselyFNbNiZ3errC6object5Error@Base 12
+ _D2rt8sections20scanDataSegPreciselyFNbNiZb@Base 12
+ _D2rt9critical_11__moduleRefZ@Base 12
+ _D2rt9critical_11ensureMutexFNbPOSQBgQBg18D_CRITICAL_SECTIONZv@Base 12
+ _D2rt9critical_12__ModuleInfoZ@Base 12
+ _D2rt9critical_18D_CRITICAL_SECTION6__initZ@Base 12
+ _D2rt9critical_3gcsOSQtQs18D_CRITICAL_SECTION@Base 12
+ _D2rt9critical_4headOPSQvQu18D_CRITICAL_SECTION@Base 12
+ _D2rt9profilegc10accumulateFNbNiAyakQeQgmZv@Base 12
+ _D2rt9profilegc11__moduleRefZ@Base 12
+ _D2rt9profilegc11logfilenameAya@Base 12
+ _D2rt9profilegc12__ModuleInfoZ@Base 12
+ _D2rt9profilegc15globalNewCountsS4core8internal9container7hashtab__T7HashTabTAxaTSQDcQDc5EntryZQBb@Base 12
+ _D2rt9profilegc18_staticDtor_L93_C1FZ11__critsec19OPv@Base 12
+ _D2rt9profilegc18_staticDtor_L93_C1FZv@Base 12
+ _D2rt9profilegc25_sharedStaticDtor_L115_C1FZ6Result11__xopEqualsMxFKxSQCqQCqQCjFZQBlZb@Base 12
+ _D2rt9profilegc25_sharedStaticDtor_L115_C1FZ6Result6__initZ@Base 12
+ _D2rt9profilegc25_sharedStaticDtor_L115_C1FZ6Result9__xtoHashFNbNeKxSQCpQCpQCiFZQBkZm@Base 12
+ _D2rt9profilegc25_sharedStaticDtor_L115_C1FZ6Result9qsort_cmpUNbNiMxPvMxQeZi@Base 12
+ _D2rt9profilegc25_sharedStaticDtor_L115_C1FZv@Base 12
+ _D2rt9profilegc5Entry6__initZ@Base 12
+ _D2rt9profilegc6bufferAa@Base 12
+ _D2rt9profilegc9newCountsS4core8internal9container7hashtab__T7HashTabTAxaTSQCvQCv5EntryZQBb@Base 12
+ _D30TypeInfo_E4core4time9ClockType6__initZ@Base 12
+ _D30TypeInfo_S2rt8monitor_7Monitor6__initZ@Base 12
+ _D30TypeInfo_S4core5bitop8BitRange6__initZ@Base 12
+ _D30TypeInfo_xS2rt9profilegc5Entry6__initZ@Base 12
+ _D30TypeInfo_yS6object10ModuleInfo6__initZ@Base 12
+ _D31TypeInfo_C3gcc3deh11CxxTypeInfo6__initZ@Base 12
+ _D31TypeInfo_PyS6object10ModuleInfo6__initZ@Base 12
+ _D31TypeInfo_S3gcc8sections3elf3DSO6__initZ@Base 12
+ _D31TypeInfo_S4core5cpuid9CacheInfo6__initZ@Base 12
+ _D31TypeInfo_S4core6memory2GC5Stats6__initZ@Base 12
+ _D31TypeInfo_S4core6memory8BlkInfo_6__initZ@Base 12
+ _D31TypeInfo_S4core7runtime7Runtime6__initZ@Base 12
+ _D31TypeInfo_yPS6object10ModuleInfo6__initZ@Base 12
+ _D32TypeInfo_AyPS6object10ModuleInfo6__initZ@Base 12
+ _D32TypeInfo_C6object6Object7Monitor6__initZ@Base 12
+ _D32TypeInfo_S2rt5minfo11ModuleGroup6__initZ@Base 12
+ _D32TypeInfo_S4core2gc6config6Config6__initZ@Base 12
+ _D32TypeInfo_S4core4stdc4fenv6fenv_t6__initZ@Base 12
+ _D32TypeInfo_S4core4sync5event5Event6__initZ@Base 12
+ _D32TypeInfo_S4core8demangle7NoHooks6__initZ@Base 12
+ _D32TypeInfo_S6object13__va_list_tag6__initZ@Base 12
+ _D32TypeInfo_xPyS6object10ModuleInfo6__initZ@Base 12
+ _D32TypeInfo_xS3gcc8sections3elf3DSO6__initZ@Base 12
+ _D32TypeInfo_yS4core5cpuid9CacheInfo6__initZ@Base 12
+ _D33TypeInfo_AxPyS6object10ModuleInfo6__initZ@Base 12
+ _D33TypeInfo_E4core6memory2GC7BlkAttr6__initZ@Base 12
+ _D33TypeInfo_E4core9attribute7mustuse6__initZ@Base 12
+ _D33TypeInfo_PxS3gcc8sections3elf3DSO6__initZ@Base 12
+ _D33TypeInfo_S4core2gc8registry5Entry6__initZ@Base 12
+ _D33TypeInfo_S4core4stdc5stdio6fpos_t6__initZ@Base 12
+ _D33TypeInfo_S4core4stdc6locale5lconv6__initZ@Base 12
+ _D33TypeInfo_S4core4stdc6stdlib5div_t6__initZ@Base 12
+ _D33TypeInfo_S6object14OffsetTypeInfo6__initZ@Base 12
+ _D33TypeInfo_xAPyS6object10ModuleInfo6__initZ@Base 12
+ _D33TypeInfo_xAyPS6object10ModuleInfo6__initZ@Base 12
+ _D33TypeInfo_xC6object6Object7Monitor6__initZ@Base 12
+ _D33TypeInfo_xPS3gcc8sections3elf3DSO6__initZ@Base 12
+ _D33TypeInfo_xS2rt5minfo11ModuleGroup6__initZ@Base 12
+ _D33TypeInfo_xS4core4sync5event5Event6__initZ@Base 12
+ _D33TypeInfo_xS4core8demangle7NoHooks6__initZ@Base 12
+ _D34TypeInfo_C4core2gc11gcinterface2GC6__initZ@Base 12
+ _D34TypeInfo_E3gcc6config11ThreadModel6__initZ@Base 12
+ _D34TypeInfo_S4core3sys5posix6direntQh6__initZ@Base 12
+ _D34TypeInfo_S4core4stdc6stdlib6ldiv_t6__initZ@Base 12
+ _D34TypeInfo_S4core4time12TickDuration6__initZ@Base 12
+ _D34TypeInfo_S4core5cpuid11CpuFeatures6__initZ@Base 12
+ _D35TypeInfo_E4core6atomic11MemoryOrder6__initZ@Base 12
+ _D35TypeInfo_S3gcc3deh15ExceptionHeader6__initZ@Base 12
+ _D35TypeInfo_S4core3sys5linux7ifaddrsQi6__initZ@Base 12
+ _D35TypeInfo_S4core3sys5posix3aio5aiocb6__initZ@Base 12
+ _D35TypeInfo_S4core3sys5posix3grp5group6__initZ@Base 12
+ _D35TypeInfo_S4core3sys5posix7termiosQi6__initZ@Base 12
+ _D35TypeInfo_S4core4stdc5stdio8_IO_FILE6__initZ@Base 12
+ _D35TypeInfo_S4core4stdc6stdlib7lldiv_t6__initZ@Base 12
+ _D35TypeInfo_S4core9attribute9gnuAbiTag6__initZ@Base 12
+ _D36TypeInfo_E4core4stdc6config8__c_long6__initZ@Base 12
+ _D36TypeInfo_FZC4core2gc11gcinterface2GC6__initZ@Base 12
+ _D36TypeInfo_S2rt6dmain214UnitTestResult6__initZ@Base 12
+ _D36TypeInfo_S3gcc9backtrace10SymbolInfo6__initZ@Base 12
+ _D36TypeInfo_S4core2gc11gcinterface4Root6__initZ@Base 12
+ _D36TypeInfo_S4core3sys5linux2fs7fsxattr6__initZ@Base 12
+ _D36TypeInfo_S4core3sys5posix3pwd6passwd6__initZ@Base 12
+ _D36TypeInfo_S4core3sys5posix6dirent3DIR6__initZ@Base 12
+ _D36TypeInfo_xS3gcc3deh15ExceptionHeader6__initZ@Base 12
+ _D37TypeInfo_C6object9Throwable9TraceInfo6__initZ@Base 12
+ _D37TypeInfo_E4core4stdc6config9__c_ulong6__initZ@Base 12
+ _D37TypeInfo_PFZC4core2gc11gcinterface2GC6__initZ@Base 12
+ _D37TypeInfo_PxS3gcc3deh15ExceptionHeader6__initZ@Base 12
+ _D37TypeInfo_S3gcc8sections3elf9ThreadDSO6__initZ@Base 12
+ _D37TypeInfo_S3gcc8sections3elf9tls_index6__initZ@Base 12
+ _D37TypeInfo_S4core2gc11gcinterface5Range6__initZ@Base 12
+ _D37TypeInfo_S4core3sys5posix3aio7aiocb646__initZ@Base 12
+ _D37TypeInfo_S4core3sys5posix4poll6pollfd6__initZ@Base 12
+ _D37TypeInfo_S4core3sys5posix5fcntl5flock6__initZ@Base 12
+ _D37TypeInfo_S4core3sys5posixQk3uio5iovec6__initZ@Base 12
+ _D37TypeInfo_S4core3sys5posixQk7utsnameQi6__initZ@Base 12
+ _D37TypeInfo_S4core4stdc6wchar_9mbstate_t6__initZ@Base 12
+ _D37TypeInfo_S4core6stdcpp4new_9nothrow_t6__initZ@Base 12
+ _D37TypeInfo_xPS3gcc3deh15ExceptionHeader6__initZ@Base 12
+ _D38TypeInfo_S3gcc3deh18CxaExceptionHeader6__initZ@Base 12
+ _D38TypeInfo_S4core2gc6config11PrettyBytes6__initZ@Base 12
+ _D38TypeInfo_S4core3sys5linux4link7r_debug6__initZ@Base 12
+ _D38TypeInfo_S4core3sys5posix4stdc4time2tm6__initZ@Base 12
+ _D38TypeInfo_S4core3sys5posix5netdb6netent6__initZ@Base 12
+ _D38TypeInfo_S4core3sys5posix6locale5lconv6__initZ@Base 12
+ _D38TypeInfo_S4core3sys5posixQk3msg6msgbuf6__initZ@Base 12
+ _D38TypeInfo_S4core8internal7convert5Float6__initZ@Base 12
+ _D38TypeInfo_xPFZC4core2gc11gcinterface2GC6__initZ@Base 12
+ _D38TypeInfo_xS4core2gc11gcinterface5Range6__initZ@Base 12
+ _D39TypeInfo_AC4core6thread8osthread6Thread6__initZ@Base 12
+ _D39TypeInfo_S3gcc9backtrace13SymbolOrError6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5linux3elf9Elf32_Dyn6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5linux3elf9Elf32_Lib6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5linux3elf9Elf32_Rel6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5linux3elf9Elf32_Sym6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5linux3elf9Elf64_Dyn6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5linux3elf9Elf64_Lib6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5linux3elf9Elf64_Rel6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5linux3elf9Elf64_Sym6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5linux4link8link_map6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5posix5dlfcn7Dl_info6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5posix5netdb7hostent6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5posix5netdb7servent6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5posix5utime7utimbuf6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5posix6signal6sigval6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5posixQk3msg7msginfo6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5posixQk4stat6stat_t6__initZ@Base 12
+ _D39TypeInfo_S4core4stdc8inttypes9imaxdiv_t6__initZ@Base 12
+ _D39TypeInfo_S4core6memory2GC12ProfileStats6__initZ@Base 12
+ _D39TypeInfo_S4core6thread7context8Callable6__initZ@Base 12
+ _D39TypeInfo_S4core7runtime14UnitTestResult6__initZ@Base 12
+ _D39TypeInfo_S4core8internal2gc4bits6GCBits6__initZ@Base 12
+ _D3gcc10attributes11__moduleRefZ@Base 12
+ _D3gcc10attributes12__ModuleInfoZ@Base 12
+ _D3gcc12libbacktrace11__moduleRefZ@Base 12
+ _D3gcc12libbacktrace12__ModuleInfoZ@Base 12
+ _D3gcc12libbacktrace15backtrace_state6__initZ@Base 12
+ _D3gcc3deh11CxxTypeInfo11__InterfaceZ@Base 12
+ _D3gcc3deh11__moduleRefZ@Base 12
+ _D3gcc3deh12__ModuleInfoZ@Base 12
+ _D3gcc3deh12getClassInfoFNiPSQBb6unwind7generic17_Unwind_ExceptionPxhZC14TypeInfo_Class@Base 12
+ _D3gcc3deh15ExceptionHeader11__xopEqualsMxFKxSQBsQBrQBqZb@Base 12
+ _D3gcc3deh15ExceptionHeader17toExceptionHeaderFNiPSQBx6unwind7generic17_Unwind_ExceptionZPSQDlQDkQDj@Base 12
+ _D3gcc3deh15ExceptionHeader3popFNiZPSQBjQBiQBh@Base 12
+ _D3gcc3deh15ExceptionHeader4freeFNiPSQBjQBiQBhZv@Base 12
+ _D3gcc3deh15ExceptionHeader4pushMFNiZv@Base 12
+ _D3gcc3deh15ExceptionHeader4saveFNiPSQBj6unwind7generic17_Unwind_ExceptionmiPxhmZv@Base 12
+ _D3gcc3deh15ExceptionHeader5stackPSQBhQBgQBf@Base 12
+ _D3gcc3deh15ExceptionHeader6__initZ@Base 12
+ _D3gcc3deh15ExceptionHeader6createFNiC6object9ThrowableZPSQCeQCdQCc@Base 12
+ _D3gcc3deh15ExceptionHeader7restoreFNiPSQBm6unwind7generic17_Unwind_ExceptionJiJPxhJmJmZv@Base 12
+ _D3gcc3deh15ExceptionHeader9__xtoHashFNbNeKxSQBrQBqQBpZm@Base 12
+ _D3gcc3deh15ExceptionHeader9ehstorageSQBkQBjQBi@Base 12
+ _D3gcc3deh17__gdc_personalityFimPSQBg6unwind7generic17_Unwind_ExceptionPSQCtQBnQBj15_Unwind_ContextZk@Base 12
+ _D3gcc3deh17actionTableLookupFiPSQBf6unwind7generic17_Unwind_ExceptionPxhQdmmQhhJbJbZi@Base 12
+ _D3gcc3deh18CONTINUE_UNWINDINGFPSQBf6unwind7generic17_Unwind_ExceptionPSQCsQBnQBj15_Unwind_ContextZk@Base 12
+ _D3gcc3deh18CxaExceptionHeader14getAdjustedPtrFPSQBv6unwind7generic17_Unwind_ExceptionCQDhQDg11CxxTypeInfoZPv@Base 12
+ _D3gcc3deh18CxaExceptionHeader17toExceptionHeaderFNiPSQCa6unwind7generic17_Unwind_ExceptionZPSQDoQDnQDm@Base 12
+ _D3gcc3deh18CxaExceptionHeader4saveFNiPSQBm6unwind7generic17_Unwind_ExceptionPvZv@Base 12
+ _D3gcc3deh18CxaExceptionHeader6__initZ@Base 12
+ _D3gcc3deh19isGdcExceptionClassFNimZb@Base 12
+ _D3gcc3deh19isGxxExceptionClassFNimZb@Base 12
+ _D3gcc3deh20isDependentExceptionFNimZb@Base 12
+ _D3gcc3deh8_d_throwUC6object9ThrowableZ17exception_cleanupUNikPSQCk6unwind7generic17_Unwind_ExceptionZv@Base 12
+ _D3gcc3deh8scanLSDAFPxhmiPSQz6unwind7generic17_Unwind_ExceptionPSQClQBnQBj15_Unwind_ContextmJmJiZk@Base 12
+ _D3gcc3deh9terminateFNiAyakZ11terminatingb@Base 12
+ _D3gcc3deh9terminateFNiAyakZv@Base 12
+ _D3gcc6config11__moduleRefZ@Base 12
+ _D3gcc6config12__ModuleInfoZ@Base 12
+ _D3gcc6emutls11__moduleRefZ@Base 12
+ _D3gcc6emutls12__ModuleInfoZ@Base 12
+ _D3gcc6unwind10arm_common11__moduleRefZ@Base 12
+ _D3gcc6unwind10arm_common12__ModuleInfoZ@Base 12
+ _D3gcc6unwind11__moduleRefZ@Base 12
+ _D3gcc6unwind12__ModuleInfoZ@Base 12
+ _D3gcc6unwind2pe11__moduleRefZ@Base 12
+ _D3gcc6unwind2pe12__ModuleInfoZ@Base 12
+ _D3gcc6unwind2pe12read_sleb128FNiKPxhZl@Base 12
+ _D3gcc6unwind2pe12read_uleb128FNiKPxhZm@Base 12
+ _D3gcc6unwind2pe18read_encoded_valueFNiPSQBnQBm7generic15_Unwind_ContexthKPxhZm@Base 12
+ _D3gcc6unwind2pe21base_of_encoded_valueFNihPSQBrQBq7generic15_Unwind_ContextZm@Base 12
+ _D3gcc6unwind2pe21size_of_encoded_valueFNihZk@Base 12
+ _D3gcc6unwind2pe28read_encoded_value_with_baseFNihmKPxhZm@Base 12
+ _D3gcc6unwind3arm11__moduleRefZ@Base 12
+ _D3gcc6unwind3arm12__ModuleInfoZ@Base 12
+ _D3gcc6unwind3c6x11__moduleRefZ@Base 12
+ _D3gcc6unwind3c6x12__ModuleInfoZ@Base 12
+ _D3gcc6unwind7generic11__moduleRefZ@Base 12
+ _D3gcc6unwind7generic12__ModuleInfoZ@Base 12
+ _D3gcc6unwind7generic17_Unwind_Exception6__initZ@Base 12
+ _D3gcc7gthread11__moduleRefZ@Base 12
+ _D3gcc7gthread12__ModuleInfoZ@Base 12
+ _D3gcc7gthread18__gthread_active_pFNbNiZi@Base 12
+ _D3gcc8builtins11__moduleRefZ@Base 12
+ _D3gcc8builtins12__ModuleInfoZ@Base 12
+ _D3gcc8builtins13__va_list_tag6__initZ@Base 12
+ _D3gcc8sections11__moduleRefZ@Base 12
+ _D3gcc8sections12__ModuleInfoZ@Base 12
+ _D3gcc8sections18pinLoadedLibrariesFNbNiZPv@Base 12
+ _D3gcc8sections20unpinLoadedLibrariesFNbNiPvZv@Base 12
+ _D3gcc8sections22cleanupLoadedLibrariesFNbNiZv@Base 12
+ _D3gcc8sections22inheritLoadedLibrariesFNbNiPvZv@Base 12
+ _D3gcc8sections3elf10_rtLoadingb@Base 12
+ _D3gcc8sections3elf11__moduleRefZ@Base 12
+ _D3gcc8sections3elf11_loadedDSOsFNbNcNdNiZ1xS4core8internal9container5array__T5ArrayTSQDgQDfQCz9ThreadDSOZQBc@Base 12
+ _D3gcc8sections3elf11_loadedDSOsFNbNcNdNiZS4core8internal9container5array__T5ArrayTSQDeQDdQCx9ThreadDSOZQBc@Base 12
+ _D3gcc8sections3elf11getTLSRangeFNbNimmZAv@Base 12
+ _D3gcc8sections3elf12__ModuleInfoZ@Base 12
+ _D3gcc8sections3elf12_handleToDSOFNbNcNdNiZ1xS4core8internal9container7hashtab__T7HashTabTPvTPSQDpQDoQDi3DSOZQBc@Base 12
+ _D3gcc8sections3elf12_handleToDSOFNbNcNdNiZS4core8internal9container7hashtab__T7HashTabTPvTPSQDnQDmQDg3DSOZQBc@Base 12
+ _D3gcc8sections3elf12decThreadRefFPSQBiQBhQBb3DSObZv@Base 12
+ _D3gcc8sections3elf12dsoForHandleFNbNiPvZPSQBpQBoQBi3DSO@Base 12
+ _D3gcc8sections3elf12finiSectionsFNbNiZv@Base 12
+ _D3gcc8sections3elf12incThreadRefFPSQBiQBhQBb3DSObZv@Base 12
+ _D3gcc8sections3elf12initSectionsFNbNiZv@Base 12
+ _D3gcc8sections3elf12scanSegmentsFNbNiIKS4core3sys5linux4link12dl_phdr_infoPSQCxQCwQCq3DSOZv@Base 12
+ _D3gcc8sections3elf13findThreadDSOFNbNiPSQBnQBmQBg3DSOZPSQCdQCcQBw9ThreadDSO@Base 12
+ _D3gcc8sections3elf13finiTLSRangesFNbNiPS4core8internal9container5array__T5ArrayTSQDcQDbQCv9ThreadDSOZQBcZv@Base 12
+ _D3gcc8sections3elf13handleForAddrFNbNiPvZQd@Base 12
+ _D3gcc8sections3elf13handleForNameFNbNixPaZPv@Base 12
+ _D3gcc8sections3elf13initTLSRangesFNbNiZPS4core8internal9container5array__T5ArrayTSQDdQDcQCw9ThreadDSOZQBc@Base 12
+ _D3gcc8sections3elf13runFinalizersFPSQBjQBiQBc3DSOZv@Base 12
+ _D3gcc8sections3elf13scanTLSRangesFNbPS4core8internal9container5array__T5ArrayTSQDaQCzQCt9ThreadDSOZQBcMDFNbPvQcZvZv@Base 12
+ _D3gcc8sections3elf15CompilerDSOData6__initZ@Base 12
+ _D3gcc8sections3elf15getDependenciesFNbNiIKS4core3sys5linux4link12dl_phdr_infoKSQBk8internal9container5array__T5ArrayTPSQEoQEnQEh3DSOZQxZv@Base 12
+ _D3gcc8sections3elf15setDSOForHandleFNbNiPSQBpQBoQBi3DSOPvZv@Base 12
+ _D3gcc8sections3elf16linkMapForHandleFNbNiPvZPS4core3sys5linux4link8link_map@Base 12
+ _D3gcc8sections3elf16registerGCRangesFNbNiPSQBqQBpQBj3DSOZv@Base 12
+ _D3gcc8sections3elf17_handleToDSOMutexS4core3sys5posixQk5types15pthread_mutex_t@Base 12
+ _D3gcc8sections3elf17unsetDSOForHandleFNbNiPSQBrQBqQBk3DSOPvZv@Base 12
+ _D3gcc8sections3elf18findDSOInfoForAddrFNbNiIPvPS4core3sys5linux4link12dl_phdr_infoZ2DG6__initZ@Base 12
+ _D3gcc8sections3elf18findDSOInfoForAddrFNbNiIPvPS4core3sys5linux4link12dl_phdr_infoZ8callbackUNbNiQBzmPvZi@Base 12
+ _D3gcc8sections3elf18findDSOInfoForAddrFNbNiIPvPS4core3sys5linux4link12dl_phdr_infoZb@Base 12
+ _D3gcc8sections3elf18findSegmentForAddrFNbNiIKS4core3sys5linux4link12dl_phdr_infoIPvPSQBnQBlQBkQDc10Elf64_PhdrZb@Base 12
+ _D3gcc8sections3elf18unregisterGCRangesFNbNiPSQBsQBrQBl3DSOZv@Base 12
+ _D3gcc8sections3elf20runModuleDestructorsFPSQBqQBpQBj3DSObZv@Base 12
+ _D3gcc8sections3elf21_isRuntimeInitializedb@Base 12
+ _D3gcc8sections3elf21runModuleConstructorsFPSQBrQBqQBk3DSObZv@Base 12
+ _D3gcc8sections3elf3DSO11__fieldDtorMFNbNiZv@Base 12
+ _D3gcc8sections3elf3DSO11__invariantMxFZv@Base 12
+ _D3gcc8sections3elf3DSO11__xopEqualsMxFKxSQBoQBnQBhQBgZb@Base 12
+ _D3gcc8sections3elf3DSO11moduleGroupMNgFNbNcNdNiNjZNgS2rt5minfo11ModuleGroup@Base 12
+ _D3gcc8sections3elf3DSO12__invariant0MxFZv@Base 12
+ _D3gcc8sections3elf3DSO14opApplyReverseFMDFKSQBrQBqQBkQBjZiZi@Base 12
+ _D3gcc8sections3elf3DSO6__initZ@Base 12
+ _D3gcc8sections3elf3DSO7modulesMxFNbNdNiZAyPS6object10ModuleInfo@Base 12
+ _D3gcc8sections3elf3DSO7opApplyFMDFKSQBjQBiQBcQBbZiZi@Base 12
+ _D3gcc8sections3elf3DSO8gcRangesMNgFNbNdNiZANgAv@Base 12
+ _D3gcc8sections3elf3DSO8opAssignMFNbNcNiNjSQBpQBoQBiQBhZQo@Base 12
+ _D3gcc8sections3elf3DSO8tlsRangeMxFNbNiZAv@Base 12
+ _D3gcc8sections3elf3DSO9__xtoHashFNbNeKxSQBnQBmQBgQBfZm@Base 12
+ _D3gcc8sections3elf7freeDSOFNbNiPSQBgQBfQz3DSOZv@Base 12
+ _D3gcc8sections3elf9ThreadDSO11__xopEqualsMxFKxSQBuQBtQBnQBmZb@Base 12
+ _D3gcc8sections3elf9ThreadDSO14updateTLSRangeMFNbNiZv@Base 12
+ _D3gcc8sections3elf9ThreadDSO6__initZ@Base 12
+ _D3gcc8sections3elf9ThreadDSO9__xtoHashFNbNeKxSQBtQBsQBmQBlZm@Base 12
+ _D3gcc8sections3elf9finiLocksFNbNiZv@Base 12
+ _D3gcc8sections3elf9initLocksFNbNiZv@Base 12
+ _D3gcc8sections3elf9sizeOfTLSFNbNiZm@Base 12
+ _D3gcc8sections3elf9tls_index6__initZ@Base 12
+ _D3gcc8sections3elf__T7toRangeTyPS6object10ModuleInfoZQBgFNaNbNiPyQBiQfZAyQBq@Base 12
+ _D3gcc8sections5macho11__moduleRefZ@Base 12
+ _D3gcc8sections5macho12__ModuleInfoZ@Base 12
+ _D3gcc8sections6common10safeAssertFNbNiNfbMAyaMQemZv@Base 12
+ _D3gcc8sections6common11__moduleRefZ@Base 12
+ _D3gcc8sections6common12__ModuleInfoZ@Base 12
+ _D3gcc8sections6pecoff11__moduleRefZ@Base 12
+ _D3gcc8sections6pecoff12__ModuleInfoZ@Base 12
+ _D3gcc9attribute11__moduleRefZ@Base 12
+ _D3gcc9attribute12__ModuleInfoZ@Base 12
+ _D3gcc9backtrace10SymbolInfo6__initZ@Base 12
+ _D3gcc9backtrace10formatLineFxSQBdQBc10SymbolInfoNkKG1536aZAa@Base 12
+ _D3gcc9backtrace11__moduleRefZ@Base 12
+ _D3gcc9backtrace12LibBacktrace11initializedb@Base 12
+ _D3gcc9backtrace12LibBacktrace16initLibBacktraceFZv@Base 12
+ _D3gcc9backtrace12LibBacktrace5statePSQBk12libbacktrace15backtrace_state@Base 12
+ _D3gcc9backtrace12LibBacktrace6__ctorMFiZCQBoQBnQBg@Base 12
+ _D3gcc9backtrace12LibBacktrace6__initZ@Base 12
+ _D3gcc9backtrace12LibBacktrace6__vtblZ@Base 12
+ _D3gcc9backtrace12LibBacktrace7__ClassZ@Base 12
+ _D3gcc9backtrace12LibBacktrace7opApplyMxFMDFKmKSQBuQBt13SymbolOrErrorZiZi@Base 12
+ _D3gcc9backtrace12LibBacktrace7opApplyMxFMDFKmKxAaZiZi@Base 12
+ _D3gcc9backtrace12LibBacktrace7opApplyMxFMDFKxAaZiZi@Base 12
+ _D3gcc9backtrace12LibBacktrace8toStringMxFZAya@Base 12
+ _D3gcc9backtrace12__ModuleInfoZ@Base 12
+ _D3gcc9backtrace13SymbolOrError6__initZ@Base 12
+ _D3gcc9backtrace18SymbolCallbackInfo5resetMFZv@Base 12
+ _D3gcc9backtrace18SymbolCallbackInfo6__initZ@Base 12
+ _D3gcc9backtrace19SymbolCallbackInfo26__initZ@Base 12
+ _D40TypeInfo_E4core6stdcpp4new_11align_val_t6__initZ@Base 12
+ _D40TypeInfo_E4core6thread5fiber5Fiber5State6__initZ@Base 12
+ _D40TypeInfo_S4core3sys5linux4tipc9tipc_name6__initZ@Base 12
+ _D40TypeInfo_S4core3sys5posix5netdb8addrinfo6__initZ@Base 12
+ _D40TypeInfo_S4core3sys5posix5netdb8protoent6__initZ@Base 12
+ _D40TypeInfo_S4core3sys5posix6mqueue7mq_attr6__initZ@Base 12
+ _D40TypeInfo_S4core3sys5posix6signal7stack_t6__initZ@Base 12
+ _D40TypeInfo_S4core3sys5posixQk3ipc8ipc_perm6__initZ@Base 12
+ _D40TypeInfo_S4core3sys5posixQk3msg8msqid_ds6__initZ@Base 12
+ _D40TypeInfo_S4core3sys5posixQk3shm8shmid_ds6__initZ@Base 12
+ _D40TypeInfo_S4core3sys5posixQk4time7timeval6__initZ@Base 12
+ _D40TypeInfo_S4core3sys5posixQk5ioctl6termio6__initZ@Base 12
+ _D41TypeInfo_E4core3sys5posixQk4wait8idtype_t6__initZ@Base 12
+ _D41TypeInfo_E4core3sys5posixQk7statvfs5FFlag6__initZ@Base 12
+ _D41TypeInfo_E4core4stdc6config12__c_longlong6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5linux3elf10Elf32_Ehdr6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5linux3elf10Elf32_Move6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5linux3elf10Elf32_Nhdr6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5linux3elf10Elf32_Phdr6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5linux3elf10Elf32_Rela6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5linux3elf10Elf32_Shdr6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5linux3elf10Elf64_Ehdr6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5linux3elf10Elf64_Move6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5linux3elf10Elf64_Nhdr6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5linux3elf10Elf64_Phdr6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5linux3elf10Elf64_Rela6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5linux3elf10Elf64_Shdr6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5linux5sched9cpu_set_t6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5posix6signal8sigevent6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5posix6signal8sigset_t6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5posix6signal8sigstack6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5posix6signal8timespec6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5posix9semaphore5sem_t6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5posixQk5ioctl7winsize6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5posixQk6select6fd_set6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5posixQk6socket6linger6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5posixQk6socket6msghdr6__initZ@Base 12
+ _D42TypeInfo_E4core4stdc6config13__c_ulonglong6__initZ@Base 12
+ _D42TypeInfo_E4core6thread5fiber5Fiber7Rethrow6__initZ@Base 12
+ _D42TypeInfo_HC4core6thread8osthread6ThreadQBd6__initZ@Base 12
+ _D42TypeInfo_S4core3sys5linux2fs12fstrim_range6__initZ@Base 12
+ _D42TypeInfo_S4core3sys5linux3elf11Elf32_gptab6__initZ@Base 12
+ _D42TypeInfo_S4core3sys5linux3elf11Elf_Options6__initZ@Base 12
+ _D42TypeInfo_S4core3sys5linux4tipc10tipc_event6__initZ@Base 12
+ _D42TypeInfo_S4core3sys5posix4time10itimerspec6__initZ@Base 12
+ _D42TypeInfo_S4core3sys5posix6signal9siginfo_t6__initZ@Base 12
+ _D42TypeInfo_S4core3sys5posixQk4time9itimerval6__initZ@Base 12
+ _D42TypeInfo_S4core3sys5posixQk5ioctl8termios26__initZ@Base 12
+ _D42TypeInfo_S4core3sys5posixQk6socket7cmsghdr6__initZ@Base 12
+ _D42TypeInfo_S4core8internal8spinlock8SpinLock6__initZ@Base 12
+ _D43TypeInfo_E4core6thread10threadbase8IsMarked6__initZ@Base 12
+ _D43TypeInfo_E4core6thread10threadbase8ScanType6__initZ@Base 12
+ _D43TypeInfo_E4core8internal2gc2os11ChildStatus6__initZ@Base 12
+ _D43TypeInfo_OS4core8internal8spinlock8SpinLock6__initZ@Base 12
+ _D43TypeInfo_S2rt4util7utility__T8_ComplexTdZQm6__initZ@Base 12
+ _D43TypeInfo_S2rt4util7utility__T8_ComplexTeZQm6__initZ@Base 12
+ _D43TypeInfo_S2rt4util7utility__T8_ComplexTfZQm6__initZ@Base 12
+ _D43TypeInfo_S2rt9critical_18D_CRITICAL_SECTION6__initZ@Base 12
+ _D43TypeInfo_S4core3sys5linux2fs13inodes_stat_t6__initZ@Base 12
+ _D43TypeInfo_S4core3sys5linux3elf12Elf32_Verdef6__initZ@Base 12
+ _D43TypeInfo_S4core3sys5linux3elf12Elf32_auxv_t6__initZ@Base 12
+ _D43TypeInfo_S4core3sys5linux3elf12Elf64_Verdef6__initZ@Base 12
+ _D43TypeInfo_S4core3sys5linux3elf12Elf64_auxv_t6__initZ@Base 12
+ _D43TypeInfo_S4core3sys5linux4tipc11tipc_portid6__initZ@Base 12
+ _D43TypeInfo_S4core3sys5linux4tipc11tipc_subscr6__initZ@Base 12
+ _D43TypeInfo_S4core3sys5linux5dlfcn10Dl_serinfo6__initZ@Base 12
+ _D43TypeInfo_S4core3sys5linux5dlfcn10Dl_serpath6__initZ@Base 12
+ _D43TypeInfo_S4core3sys5posix4arpa4inet7in_addr6__initZ@Base 12
+ _D43TypeInfo_S4core3sys5posixQk2un11sockaddr_un6__initZ@Base 12
+ _D43TypeInfo_S4core3sys5posixQk6socket8sockaddr6__initZ@Base 12
+ _D43TypeInfo_S4core3sys5posixQk8resource6rlimit6__initZ@Base 12
+ _D43TypeInfo_S4core3sys5posixQk8resource6rusage6__initZ@Base 12
+ _D43TypeInfo_S4core6thread5types13ll_ThreadData6__initZ@Base 12
+ _D44TypeInfo_E2rt4util7utility16__c_complex_real6__initZ@Base 12
+ _D44TypeInfo_E4core6thread7context8Callable4Call6__initZ@Base 12
+ _D44TypeInfo_OS2rt9critical_18D_CRITICAL_SECTION6__initZ@Base 12
+ _D44TypeInfo_S3gcc8sections3elf15CompilerDSOData6__initZ@Base 12
+ _D44TypeInfo_S3gcc9backtrace18SymbolCallbackInfo6__initZ@Base 12
+ _D44TypeInfo_S4core3sys5linux3elf13Elf32_RegInfo6__initZ@Base 12
+ _D44TypeInfo_S4core3sys5linux3elf13Elf32_Syminfo6__initZ@Base 12
+ _D44TypeInfo_S4core3sys5linux3elf13Elf32_Verdaux6__initZ@Base 12
+ _D44TypeInfo_S4core3sys5linux3elf13Elf32_Vernaux6__initZ@Base 12
+ _D44TypeInfo_S4core3sys5linux3elf13Elf32_Verneed6__initZ@Base 12
+ _D44TypeInfo_S4core3sys5linux3elf13Elf64_Syminfo6__initZ@Base 12
+ _D44TypeInfo_S4core3sys5linux3elf13Elf64_Verdaux6__initZ@Base 12
+ _D44TypeInfo_S4core3sys5linux3elf13Elf64_Vernaux6__initZ@Base 12
+ _D44TypeInfo_S4core3sys5linux3elf13Elf64_Verneed6__initZ@Base 12
+ _D44TypeInfo_S4core3sys5linux4link12dl_phdr_info6__initZ@Base 12
+ _D44TypeInfo_S4core3sys5linux5epoll11epoll_event6__initZ@Base 12
+ _D44TypeInfo_S4core3sys5linuxQk7sysinfo8sysinfo_6__initZ@Base 12
+ _D44TypeInfo_S4core3sys5posix5sched11sched_param6__initZ@Base 12
+ _D44TypeInfo_S4core4stdc6config__T8_ComplexTdZQm6__initZ@Base 12
+ _D44TypeInfo_S4core4stdc6config__T8_ComplexTeZQm6__initZ@Base 12
+ _D44TypeInfo_S4core4stdc6config__T8_ComplexTfZQm6__initZ@Base 12
+ _D44TypeInfo_S4core6thread7context12StackContext6__initZ@Base 12
+ _D44TypeInfo_xS2rt4util7utility__T8_ComplexTdZQm6__initZ@Base 12
+ _D44TypeInfo_xS2rt4util7utility__T8_ComplexTeZQm6__initZ@Base 12
+ _D44TypeInfo_xS2rt4util7utility__T8_ComplexTfZQm6__initZ@Base 12
+ _D45TypeInfo_E2rt4util7utility17__c_complex_float6__initZ@Base 12
+ _D45TypeInfo_E4core4stdc6config16__c_complex_real6__initZ@Base 12
+ _D45TypeInfo_E4core8internal7convert11FloatFormat6__initZ@Base 12
+ _D45TypeInfo_E6object14TypeInfo_Class10ClassFlags6__initZ@Base 12
+ _D45TypeInfo_S3gcc12libbacktrace15backtrace_state6__initZ@Base 12
+ _D45TypeInfo_S3gcc9backtrace19SymbolCallbackInfo26__initZ@Base 12
+ _D45TypeInfo_S4core3sys5linux3elf14Elf_Options_Hw6__initZ@Base 12
+ _D45TypeInfo_S4core3sys5linux3elf9Elf32_Dyn5_d_un6__initZ@Base 12
+ _D45TypeInfo_S4core3sys5linux3elf9Elf64_Dyn5_d_un6__initZ@Base 12
+ _D45TypeInfo_S4core3sys5linux4tipc13sockaddr_tipc6__initZ@Base 12
+ _D45TypeInfo_S4core3sys5linux4tipc13tipc_name_seq6__initZ@Base 12
+ _D45TypeInfo_S4core3sys5linux5epoll12epoll_data_t6__initZ@Base 12
+ _D45TypeInfo_S4core3sys5posix6signal11sigaction_t6__initZ@Base 12
+ _D45TypeInfo_S4core3sys5posixQk7statvfs9statvfs_t6__initZ@Base 12
+ _D45TypeInfo_S4core8internal12parseoptions6MemVal6__initZ@Base 12
+ _D45TypeInfo_S4core8internal9container5treap4Rand6__initZ@Base 12
+ _D46TypeInfo_E2rt4util7utility18__c_complex_double6__initZ@Base 12
+ _D46TypeInfo_E4core4stdc6config17__c_complex_float6__initZ@Base 12
+ _D46TypeInfo_S4core3sys5linux2fs16file_clone_range6__initZ@Base 12
+ _D46TypeInfo_S4core3sys5posix7netinet3in_8in6_addr6__initZ@Base 12
+ _D46TypeInfo_S4core3sys5posix8ucontext10mcontext_t6__initZ@Base 12
+ _D46TypeInfo_S4core3sys5posix8ucontext10ucontext_t6__initZ@Base 12
+ _D46TypeInfo_S4core4stdc6wchar_9mbstate_t8___value6__initZ@Base 12
+ _D46TypeInfo_S4core4sync5mutex5Mutex12MonitorProxy6__initZ@Base 12
+ _D47TypeInfo_AC4core6thread10threadbase10ThreadBase6__initZ@Base 12
+ _D47TypeInfo_E4core4stdc6config18__c_complex_double6__initZ@Base 12
+ _D47TypeInfo_E4core6stdcpp6string16DefaultConstruct6__initZ@Base 12
+ _D47TypeInfo_E4core6stdcpp6vector16DefaultConstruct6__initZ@Base 12
+ _D47TypeInfo_E4core6stdcpp8xutility14CppStdRevision6__initZ@Base 12
+ _D47TypeInfo_E6object15TypeInfo_Struct11StructFlags6__initZ@Base 12
+ _D47TypeInfo_S4core3sys5linux2fs17file_dedupe_range6__initZ@Base 12
+ _D47TypeInfo_S4core3sys5linux2fs17files_stat_struct6__initZ@Base 12
+ _D47TypeInfo_S4core3sys5linuxQk5prctl12prctl_mm_map6__initZ@Base 12
+ _D47TypeInfo_S4core3sys5posix6setjmp13__jmp_buf_tag6__initZ@Base 12
+ _D47TypeInfo_S4core3sys5posix7netinet3in_9ipv6_mreq6__initZ@Base 12
+ _D47TypeInfo_S4core6thread8osthread6Thread8Priority6__initZ@Base 12
+ _D47TypeInfo_S6object15TypeInfo_Struct11_memberFunc6__initZ@Base 12
+ _D48TypeInfo_S3gcc6unwind7generic17_Unwind_Exception6__initZ@Base 12
+ _D48TypeInfo_S4core3sys5linux8io_uring12io_uring_cqe6__initZ@Base 12
+ _D48TypeInfo_S4core3sys5linux8io_uring12io_uring_sqe6__initZ@Base 12
+ _D48TypeInfo_S4core3sys5posix8ucontext12_libc_fpxreg6__initZ@Base 12
+ _D48TypeInfo_S4core3sys5posix8ucontext12_libc_xmmreg6__initZ@Base 12
+ _D49TypeInfo_E4core3sys5linux10perf_event10perf_hw_id6__initZ@Base 12
+ _D49TypeInfo_S4core3sys5linux3elf12Elf32_auxv_t5_a_un6__initZ@Base 12
+ _D49TypeInfo_S4core3sys5linux3elf12Elf64_auxv_t5_a_un6__initZ@Base 12
+ _D49TypeInfo_S4core3sys5posix3net3if_14if_nameindex_t6__initZ@Base 12
+ _D49TypeInfo_S4core3sys5posix8ucontext13_libc_fpstate6__initZ@Base 12
+ _D49TypeInfo_S4core3sys5posixQk5types14pthread_attr_t6__initZ@Base 12
+ _D49TypeInfo_S4core3sys5posixQk5types14pthread_cond_t6__initZ@Base 12
+ _D49TypeInfo_xS3gcc6unwind7generic17_Unwind_Exception6__initZ@Base 12
+ _D4core10checkedint11__moduleRefZ@Base 12
+ _D4core10checkedint12__ModuleInfoZ@Base 12
+ _D4core2gc11gcinterface11__moduleRefZ@Base 12
+ _D4core2gc11gcinterface12__ModuleInfoZ@Base 12
+ _D4core2gc11gcinterface2GC11__InterfaceZ@Base 12
+ _D4core2gc11gcinterface4Root6__initZ@Base 12
+ _D4core2gc11gcinterface5Range11__xopEqualsMxFKxSQBuQBsQBsQBiZb@Base 12
+ _D4core2gc11gcinterface5Range6__initZ@Base 12
+ _D4core2gc11gcinterface5Range8opEqualsMxFNbMxSQBsQBqQBqQBgZb@Base 12
+ _D4core2gc11gcinterface5Range9__xtoHashFNbNeKxSQBtQBrQBrQBhZm@Base 12
+ _D4core2gc6config11PrettyBytes6__initZ@Base 12
+ _D4core2gc6config11__moduleRefZ@Base 12
+ _D4core2gc6config11prettyBytesFNaNbNiKmZa@Base 12
+ _D4core2gc6config12__ModuleInfoZ@Base 12
+ _D4core2gc6config18bytes2prettyStructFNaNbNimZSQBtQBrQBr11PrettyBytes@Base 12
+ _D4core2gc6config6Config10initializeMFNbNiZb@Base 12
+ _D4core2gc6config6Config11__xopEqualsMxFKxSQBpQBnQBnQBjZb@Base 12
+ _D4core2gc6config6Config4helpMFNbNiZv@Base 12
+ _D4core2gc6config6Config6__initZ@Base 12
+ _D4core2gc6config6Config9__xtoHashFNbNeKxSQBoQBmQBmQBiZm@Base 12
+ _D4core2gc6config6Config9errorNameMFNbNiZAya@Base 12
+ _D4core2gc6configQhSQsQpQo6Config@Base 12
+ _D4core2gc8registry11__moduleRefZ@Base 12
+ _D4core2gc8registry12__ModuleInfoZ@Base 12
+ _D4core2gc8registry16createGCInstanceFAyaZCQBpQBn11gcinterface2GC@Base 12
+ _D4core2gc8registry17registerGCFactoryFNbNiAyaPFZCQBwQBu11gcinterface2GCZv@Base 12
+ _D4core2gc8registry21registeredGCFactoriesFNbNiiZxASQByQBwQBw5Entry@Base 12
+ _D4core2gc8registry5Entry11__xopEqualsMxFKxSQBqQBoQBoQBiZb@Base 12
+ _D4core2gc8registry5Entry6__initZ@Base 12
+ _D4core2gc8registry5Entry9__xtoHashFNbNeKxSQBpQBnQBnQBhZm@Base 12
+ _D4core2gc8registry7entriesASQBbQzQy5Entry@Base 12
+ _D4core3sys5linux10perf_event11__moduleRefZ@Base 12
+ _D4core3sys5linux10perf_event12__ModuleInfoZ@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr10exclude_hvMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr10exclude_hvMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr10namespacesMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr10namespacesMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr10precise_ipMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr10precise_ipMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr11use_clockidMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr11use_clockidMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr12__reserved_1MUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr12__reserved_1MxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr12exclude_hostMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr12exclude_hostMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr12exclude_idleMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr12exclude_idleMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr12exclude_userMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr12exclude_userMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr12inherit_statMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr12inherit_statMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr13exclude_guestMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr13exclude_guestMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr13sample_id_allMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr13sample_id_allMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr14context_switchMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr14context_switchMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr14enable_on_execMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr14enable_on_execMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr14exclude_kernelMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr14exclude_kernelMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr14write_backwardMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr14write_backwardMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr22exclude_callchain_userMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr22exclude_callchain_userMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr24exclude_callchain_kernelMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr24exclude_callchain_kernelMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr4commMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr4commMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr4freqMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr4freqMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr4mmapMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr4mmapMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr4taskMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr4taskMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr5mmap2MUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr5mmap2MxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr6__initZ@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr6pinnedMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr6pinnedMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr7inheritMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr7inheritMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr8disabledMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr8disabledMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr9comm_execMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr9comm_execMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr9exclusiveMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr9exclusiveMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr9mmap_dataMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr9mmap_dataMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr9watermarkMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr9watermarkMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_type6__initZ@Base 12
+ _D4core3sys5linux10perf_event17perf_branch_entry4typeMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_branch_entry4typeMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_branch_entry5abortMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_branch_entry5abortMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_branch_entry5in_txMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_branch_entry5in_txMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_branch_entry6__initZ@Base 12
+ _D4core3sys5linux10perf_event17perf_branch_entry6cyclesMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_branch_entry6cyclesMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_branch_entry7mispredMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_branch_entry7mispredMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_branch_entry8reservedMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_branch_entry8reservedMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_branch_entry9predictedMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_branch_entry9predictedMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_event_header6__initZ@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src10mem_remoteMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src10mem_remoteMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src10mem_snoopxMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src10mem_snoopxMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src11mem_lvl_numMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src11mem_lvl_numMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src6__initZ@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src6mem_opMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src6mem_opMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src7mem_lvlMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src7mem_lvlMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src8mem_dtlbMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src8mem_dtlbMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src8mem_lockMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src8mem_lockMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src8mem_rsvdMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src8mem_rsvdMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src9mem_snoopMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src9mem_snoopMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_ns_link_info6__initZ@Base 12
+ _D4core3sys5linux10perf_event20perf_event_ioc_flags6__initZ@Base 12
+ _D4core3sys5linux10perf_event20perf_event_mmap_page11cap_____resMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event20perf_event_mmap_page11cap_____resMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event20perf_event_mmap_page13cap_user_timeMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event20perf_event_mmap_page13cap_user_timeMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event20perf_event_mmap_page14cap_user_rdpmcMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event20perf_event_mmap_page14cap_user_rdpmcMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event20perf_event_mmap_page18cap_user_time_zeroMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event20perf_event_mmap_page18cap_user_time_zeroMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event20perf_event_mmap_page22cap_bit0_is_deprecatedMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event20perf_event_mmap_page22cap_bit0_is_deprecatedMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event20perf_event_mmap_page6__initZ@Base 12
+ _D4core3sys5linux10perf_event20perf_event_mmap_page8cap_bit0MUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event20perf_event_mmap_page8cap_bit0MxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event22perf_callchain_context6__initZ@Base 12
+ _D4core3sys5linux10perf_event22perf_event_read_format6__initZ@Base 12
+ _D4core3sys5linux10perf_event23perf_branch_sample_type6__initZ@Base 12
+ _D4core3sys5linux10perf_event24perf_event_sample_format6__initZ@Base 12
+ _D4core3sys5linux2fs11__moduleRefZ@Base 12
+ _D4core3sys5linux2fs12__ModuleInfoZ@Base 12
+ _D4core3sys5linux2fs12fstrim_range6__initZ@Base 12
+ _D4core3sys5linux2fs13inodes_stat_t6__initZ@Base 12
+ _D4core3sys5linux2fs16file_clone_range6__initZ@Base 12
+ _D4core3sys5linux2fs17file_dedupe_range6__initZ@Base 12
+ _D4core3sys5linux2fs17files_stat_struct6__initZ@Base 12
+ _D4core3sys5linux2fs22file_dedupe_range_info6__initZ@Base 12
+ _D4core3sys5linux2fs7fsxattr6__initZ@Base 12
+ _D4core3sys5linux3elf10Elf32_Ehdr6__initZ@Base 12
+ _D4core3sys5linux3elf10Elf32_Move6__initZ@Base 12
+ _D4core3sys5linux3elf10Elf32_Nhdr6__initZ@Base 12
+ _D4core3sys5linux3elf10Elf32_Phdr6__initZ@Base 12
+ _D4core3sys5linux3elf10Elf32_Rela6__initZ@Base 12
+ _D4core3sys5linux3elf10Elf32_Shdr6__initZ@Base 12
+ _D4core3sys5linux3elf10Elf64_Ehdr6__initZ@Base 12
+ _D4core3sys5linux3elf10Elf64_Move6__initZ@Base 12
+ _D4core3sys5linux3elf10Elf64_Nhdr6__initZ@Base 12
+ _D4core3sys5linux3elf10Elf64_Phdr6__initZ@Base 12
+ _D4core3sys5linux3elf10Elf64_Rela6__initZ@Base 12
+ _D4core3sys5linux3elf10Elf64_Shdr6__initZ@Base 12
+ _D4core3sys5linux3elf11Elf32_gptab10_gt_header6__initZ@Base 12
+ _D4core3sys5linux3elf11Elf32_gptab6__initZ@Base 12
+ _D4core3sys5linux3elf11Elf32_gptab9_gt_entry6__initZ@Base 12
+ _D4core3sys5linux3elf11Elf_Options6__initZ@Base 12
+ _D4core3sys5linux3elf11__moduleRefZ@Base 12
+ _D4core3sys5linux3elf12Elf32_Verdef6__initZ@Base 12
+ _D4core3sys5linux3elf12Elf32_auxv_t5_a_un6__initZ@Base 12
+ _D4core3sys5linux3elf12Elf32_auxv_t6__initZ@Base 12
+ _D4core3sys5linux3elf12Elf64_Verdef6__initZ@Base 12
+ _D4core3sys5linux3elf12Elf64_auxv_t5_a_un6__initZ@Base 12
+ _D4core3sys5linux3elf12Elf64_auxv_t6__initZ@Base 12
+ _D4core3sys5linux3elf12__ModuleInfoZ@Base 12
+ _D4core3sys5linux3elf13Elf32_RegInfo6__initZ@Base 12
+ _D4core3sys5linux3elf13Elf32_Syminfo6__initZ@Base 12
+ _D4core3sys5linux3elf13Elf32_Verdaux6__initZ@Base 12
+ _D4core3sys5linux3elf13Elf32_Vernaux6__initZ@Base 12
+ _D4core3sys5linux3elf13Elf32_Verneed6__initZ@Base 12
+ _D4core3sys5linux3elf13Elf64_Syminfo6__initZ@Base 12
+ _D4core3sys5linux3elf13Elf64_Verdaux6__initZ@Base 12
+ _D4core3sys5linux3elf13Elf64_Vernaux6__initZ@Base 12
+ _D4core3sys5linux3elf13Elf64_Verneed6__initZ@Base 12
+ _D4core3sys5linux3elf14Elf_Options_Hw6__initZ@Base 12
+ _D4core3sys5linux3elf9Elf32_Dyn5_d_un6__initZ@Base 12
+ _D4core3sys5linux3elf9Elf32_Dyn6__initZ@Base 12
+ _D4core3sys5linux3elf9Elf32_Lib6__initZ@Base 12
+ _D4core3sys5linux3elf9Elf32_Rel6__initZ@Base 12
+ _D4core3sys5linux3elf9Elf32_Sym6__initZ@Base 12
+ _D4core3sys5linux3elf9Elf64_Dyn5_d_un6__initZ@Base 12
+ _D4core3sys5linux3elf9Elf64_Dyn6__initZ@Base 12
+ _D4core3sys5linux3elf9Elf64_Lib6__initZ@Base 12
+ _D4core3sys5linux3elf9Elf64_Rel6__initZ@Base 12
+ _D4core3sys5linux3elf9Elf64_Sym6__initZ@Base 12
+ _D4core3sys5linux3err11__moduleRefZ@Base 12
+ _D4core3sys5linux3err12__ModuleInfoZ@Base 12
+ _D4core3sys5linux4link11__moduleRefZ@Base 12
+ _D4core3sys5linux4link12__ModuleInfoZ@Base 12
+ _D4core3sys5linux4link12dl_phdr_info6__initZ@Base 12
+ _D4core3sys5linux4link7r_debug6__initZ@Base 12
+ _D4core3sys5linux4link8link_map6__initZ@Base 12
+ _D4core3sys5linux4time11__moduleRefZ@Base 12
+ _D4core3sys5linux4time12__ModuleInfoZ@Base 12
+ _D4core3sys5linux4tipc10tipc_event6__initZ@Base 12
+ _D4core3sys5linux4tipc11__moduleRefZ@Base 12
+ _D4core3sys5linux4tipc11tipc_portid6__initZ@Base 12
+ _D4core3sys5linux4tipc11tipc_subscr6__initZ@Base 12
+ _D4core3sys5linux4tipc12__ModuleInfoZ@Base 12
+ _D4core3sys5linux4tipc13sockaddr_tipc4Addr4Name6__initZ@Base 12
+ _D4core3sys5linux4tipc13sockaddr_tipc4Addr6__initZ@Base 12
+ _D4core3sys5linux4tipc13sockaddr_tipc6__initZ@Base 12
+ _D4core3sys5linux4tipc13tipc_name_seq6__initZ@Base 12
+ _D4core3sys5linux4tipc9tipc_name6__initZ@Base 12
+ _D4core3sys5linux5dlfcn10Dl_serinfo6__initZ@Base 12
+ _D4core3sys5linux5dlfcn10Dl_serpath6__initZ@Base 12
+ _D4core3sys5linux5dlfcn11__moduleRefZ@Base 12
+ _D4core3sys5linux5dlfcn12__ModuleInfoZ@Base 12
+ _D4core3sys5linux5epoll11__moduleRefZ@Base 12
+ _D4core3sys5linux5epoll11epoll_event6__initZ@Base 12
+ _D4core3sys5linux5epoll12__ModuleInfoZ@Base 12
+ _D4core3sys5linux5epoll12epoll_data_t6__initZ@Base 12
+ _D4core3sys5linux5errno11__moduleRefZ@Base 12
+ _D4core3sys5linux5errno12__ModuleInfoZ@Base 12
+ _D4core3sys5linux5fcntl11__moduleRefZ@Base 12
+ _D4core3sys5linux5fcntl12__ModuleInfoZ@Base 12
+ _D4core3sys5linux5sched11__moduleRefZ@Base 12
+ _D4core3sys5linux5sched12__ModuleInfoZ@Base 12
+ _D4core3sys5linux5sched9cpu_set_t6__initZ@Base 12
+ _D4core3sys5linux5stdio11__moduleRefZ@Base 12
+ _D4core3sys5linux5stdio12__ModuleInfoZ@Base 12
+ _D4core3sys5linux5stdio21cookie_io_functions_t6__initZ@Base 12
+ _D4core3sys5linux6config11__moduleRefZ@Base 12
+ _D4core3sys5linux6config12__ModuleInfoZ@Base 12
+ _D4core3sys5linux6string11__moduleRefZ@Base 12
+ _D4core3sys5linux6string12__ModuleInfoZ@Base 12
+ _D4core3sys5linux6unistd11__moduleRefZ@Base 12
+ _D4core3sys5linux6unistd12__ModuleInfoZ@Base 12
+ _D4core3sys5linux7ifaddrs11__moduleRefZ@Base 12
+ _D4core3sys5linux7ifaddrs12__ModuleInfoZ@Base 12
+ _D4core3sys5linux7ifaddrsQi6__initZ@Base 12
+ _D4core3sys5linux7netinet3in_11IN_BADCLASSFNaNbNiNfkZb@Base 12
+ _D4core3sys5linux7netinet3in_11__moduleRefZ@Base 12
+ _D4core3sys5linux7netinet3in_12IN_MULTICASTFNbNikZb@Base 12
+ _D4core3sys5linux7netinet3in_12__ModuleInfoZ@Base 12
+ _D4core3sys5linux7netinet3in_15IN_EXPERIMENTALFNaNbNiNfkZb@Base 12
+ _D4core3sys5linux7netinet3in_18IN6_ARE_ADDR_EQUALFNaNbNiNfPSQCgQCe5posixQCdQBy8in6_addrQBdZb@Base 12
+ _D4core3sys5linux7netinet3in_9IN_CLASSAFNaNbNiNfkZb@Base 12
+ _D4core3sys5linux7netinet3in_9IN_CLASSBFNaNbNiNfkZb@Base 12
+ _D4core3sys5linux7netinet3in_9IN_CLASSCFNaNbNiNfkZb@Base 12
+ _D4core3sys5linux7netinet3in_9IN_CLASSDFNaNbNiNfkZb@Base 12
+ _D4core3sys5linux7netinet3tcp11__moduleRefZ@Base 12
+ _D4core3sys5linux7netinet3tcp12__ModuleInfoZ@Base 12
+ _D4core3sys5linux7termios11__moduleRefZ@Base 12
+ _D4core3sys5linux7termios12__ModuleInfoZ@Base 12
+ _D4core3sys5linux7timerfd11__moduleRefZ@Base 12
+ _D4core3sys5linux7timerfd12__ModuleInfoZ@Base 12
+ _D4core3sys5linux8execinfo11__moduleRefZ@Base 12
+ _D4core3sys5linux8execinfo12__ModuleInfoZ@Base 12
+ _D4core3sys5linux8io_uring11__moduleRefZ@Base 12
+ _D4core3sys5linux8io_uring12__ModuleInfoZ@Base 12
+ _D4core3sys5linux8io_uring12io_uring_cqe6__initZ@Base 12
+ _D4core3sys5linux8io_uring12io_uring_sqe6__initZ@Base 12
+ _D4core3sys5linux8io_uring14io_uring_probe6__initZ@Base 12
+ _D4core3sys5linux8io_uring15io_uring_params6__initZ@Base 12
+ _D4core3sys5linux8io_uring17io_cqring_offsets6__initZ@Base 12
+ _D4core3sys5linux8io_uring17io_sqring_offsets6__initZ@Base 12
+ _D4core3sys5linux8io_uring17io_uring_probe_op6__initZ@Base 12
+ _D4core3sys5linux8io_uring20io_uring_restriction6__initZ@Base 12
+ _D4core3sys5linux8io_uring21io_uring_files_update6__initZ@Base 12
+ _D4core3sys5linux8io_uring22io_uring_getevents_arg6__initZ@Base 12
+ _D4core3sys5linuxQk4auxv11__moduleRefZ@Base 12
+ _D4core3sys5linuxQk4auxv12__ModuleInfoZ@Base 12
+ _D4core3sys5linuxQk4file11__moduleRefZ@Base 12
+ _D4core3sys5linuxQk4file12__ModuleInfoZ@Base 12
+ _D4core3sys5linuxQk4mman11__moduleRefZ@Base 12
+ _D4core3sys5linuxQk4mman12__ModuleInfoZ@Base 12
+ _D4core3sys5linuxQk4time10timerclearFNaNbNiNfPSQBtQBr5posixQCaQBr7timevalZv@Base 12
+ _D4core3sys5linuxQk4time10timerissetFNaNbNiNfPSQBtQBr5posixQCaQBr7timevalZi@Base 12
+ _D4core3sys5linuxQk4time11__moduleRefZ@Base 12
+ _D4core3sys5linuxQk4time12__ModuleInfoZ@Base 12
+ _D4core3sys5linuxQk4time8timeraddFNaNbNiNfxPSQBrQBp5posixQByQBp7timevalxQBdPSQCxQCvQBgQDbQCsQBdZv@Base 12
+ _D4core3sys5linuxQk4time8timersubFNaNbNiNfxPSQBrQBp5posixQByQBp7timevalxQBdPSQCxQCvQBgQDbQCsQBdZv@Base 12
+ _D4core3sys5linuxQk5prctl11__moduleRefZ@Base 12
+ _D4core3sys5linuxQk5prctl12__ModuleInfoZ@Base 12
+ _D4core3sys5linuxQk5prctl12prctl_mm_map6__initZ@Base 12
+ _D4core3sys5linuxQk5xattr11__moduleRefZ@Base 12
+ _D4core3sys5linuxQk5xattr12__ModuleInfoZ@Base 12
+ _D4core3sys5linuxQk6procfs11__moduleRefZ@Base 12
+ _D4core3sys5linuxQk6procfs12__ModuleInfoZ@Base 12
+ _D4core3sys5linuxQk6socket11__moduleRefZ@Base 12
+ _D4core3sys5linuxQk6socket12__ModuleInfoZ@Base 12
+ _D4core3sys5linuxQk7eventfd11__moduleRefZ@Base 12
+ _D4core3sys5linuxQk7eventfd12__ModuleInfoZ@Base 12
+ _D4core3sys5linuxQk7inotify11__moduleRefZ@Base 12
+ _D4core3sys5linuxQk7inotify12__ModuleInfoZ@Base 12
+ _D4core3sys5linuxQk7inotify13inotify_event14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core3sys5linuxQk7inotify13inotify_event6__initZ@Base 12
+ _D4core3sys5linuxQk7inotify13inotify_event8opAssignMFNaNbNcNiNjNeSQCmQCkQCjQCqQChQCcZQu@Base 12
+ _D4core3sys5linuxQk7sysinfo11__moduleRefZ@Base 12
+ _D4core3sys5linuxQk7sysinfo12__ModuleInfoZ@Base 12
+ _D4core3sys5linuxQk7sysinfo8sysinfo_6__initZ@Base 12
+ _D4core3sys5linuxQk8signalfd11__moduleRefZ@Base 12
+ _D4core3sys5linuxQk8signalfd12__ModuleInfoZ@Base 12
+ _D4core3sys5linuxQk8signalfd16signalfd_siginfo6__initZ@Base 12
+ _D4core3sys5posix3aio11__moduleRefZ@Base 12
+ _D4core3sys5posix3aio12__ModuleInfoZ@Base 12
+ _D4core3sys5posix3aio5aiocb6__initZ@Base 12
+ _D4core3sys5posix3aio7aiocb646__initZ@Base 12
+ _D4core3sys5posix3grp11__moduleRefZ@Base 12
+ _D4core3sys5posix3grp12__ModuleInfoZ@Base 12
+ _D4core3sys5posix3grp5group6__initZ@Base 12
+ _D4core3sys5posix3net3if_11__moduleRefZ@Base 12
+ _D4core3sys5posix3net3if_12__ModuleInfoZ@Base 12
+ _D4core3sys5posix3net3if_14if_nameindex_t6__initZ@Base 12
+ _D4core3sys5posix3pwd11__moduleRefZ@Base 12
+ _D4core3sys5posix3pwd12__ModuleInfoZ@Base 12
+ _D4core3sys5posix3pwd6passwd6__initZ@Base 12
+ _D4core3sys5posix4arpa4inet11__moduleRefZ@Base 12
+ _D4core3sys5posix4arpa4inet12__ModuleInfoZ@Base 12
+ _D4core3sys5posix4arpa4inet7in_addr6__initZ@Base 12
+ _D4core3sys5posix4poll11__moduleRefZ@Base 12
+ _D4core3sys5posix4poll12__ModuleInfoZ@Base 12
+ _D4core3sys5posix4poll6pollfd6__initZ@Base 12
+ _D4core3sys5posix4stdc4time11__moduleRefZ@Base 12
+ _D4core3sys5posix4stdc4time12__ModuleInfoZ@Base 12
+ _D4core3sys5posix4stdc4time2tm6__initZ@Base 12
+ _D4core3sys5posix4time10itimerspec6__initZ@Base 12
+ _D4core3sys5posix4time11__moduleRefZ@Base 12
+ _D4core3sys5posix4time12__ModuleInfoZ@Base 12
+ _D4core3sys5posix5dlfcn11__moduleRefZ@Base 12
+ _D4core3sys5posix5dlfcn12__ModuleInfoZ@Base 12
+ _D4core3sys5posix5dlfcn7Dl_info6__initZ@Base 12
+ _D4core3sys5posix5fcntl11__moduleRefZ@Base 12
+ _D4core3sys5posix5fcntl12__ModuleInfoZ@Base 12
+ _D4core3sys5posix5fcntl5flock6__initZ@Base 12
+ _D4core3sys5posix5iconv11__moduleRefZ@Base 12
+ _D4core3sys5posix5iconv12__ModuleInfoZ@Base 12
+ _D4core3sys5posix5netdb11__moduleRefZ@Base 12
+ _D4core3sys5posix5netdb12__ModuleInfoZ@Base 12
+ _D4core3sys5posix5netdb6netent6__initZ@Base 12
+ _D4core3sys5posix5netdb7hostent6__initZ@Base 12
+ _D4core3sys5posix5netdb7hostent6h_addrMUNdZPa@Base 12
+ _D4core3sys5posix5netdb7servent6__initZ@Base 12
+ _D4core3sys5posix5netdb8addrinfo6__initZ@Base 12
+ _D4core3sys5posix5netdb8protoent6__initZ@Base 12
+ _D4core3sys5posix5sched11__moduleRefZ@Base 12
+ _D4core3sys5posix5sched11sched_param6__initZ@Base 12
+ _D4core3sys5posix5sched12__ModuleInfoZ@Base 12
+ _D4core3sys5posix5spawn11__moduleRefZ@Base 12
+ _D4core3sys5posix5spawn12__ModuleInfoZ@Base 12
+ _D4core3sys5posix5spawn17posix_spawnattr_t6__initZ@Base 12
+ _D4core3sys5posix5spawn26posix_spawn_file_actions_t6__initZ@Base 12
+ _D4core3sys5posix5stdio11__moduleRefZ@Base 12
+ _D4core3sys5posix5stdio12__ModuleInfoZ@Base 12
+ _D4core3sys5posix5utime11__moduleRefZ@Base 12
+ _D4core3sys5posix5utime12__ModuleInfoZ@Base 12
+ _D4core3sys5posix5utime7utimbuf6__initZ@Base 12
+ _D4core3sys5posix6config11__moduleRefZ@Base 12
+ _D4core3sys5posix6config12__ModuleInfoZ@Base 12
+ _D4core3sys5posix6dirent11__moduleRefZ@Base 12
+ _D4core3sys5posix6dirent12__ModuleInfoZ@Base 12
+ _D4core3sys5posix6dirent3DIR6__initZ@Base 12
+ _D4core3sys5posix6direntQh6__initZ@Base 12
+ _D4core3sys5posix6libgen11__moduleRefZ@Base 12
+ _D4core3sys5posix6libgen12__ModuleInfoZ@Base 12
+ _D4core3sys5posix6locale11__moduleRefZ@Base 12
+ _D4core3sys5posix6locale12__ModuleInfoZ@Base 12
+ _D4core3sys5posix6locale5lconv6__initZ@Base 12
+ _D4core3sys5posix6mqueue11__moduleRefZ@Base 12
+ _D4core3sys5posix6mqueue12__ModuleInfoZ@Base 12
+ _D4core3sys5posix6mqueue7mq_attr6__initZ@Base 12
+ _D4core3sys5posix6setjmp11__moduleRefZ@Base 12
+ _D4core3sys5posix6setjmp12__ModuleInfoZ@Base 12
+ _D4core3sys5posix6setjmp13__jmp_buf_tag6__initZ@Base 12
+ _D4core3sys5posix6signal11__moduleRefZ@Base 12
+ _D4core3sys5posix6signal11sigaction_t6__initZ@Base 12
+ _D4core3sys5posix6signal12__ModuleInfoZ@Base 12
+ _D4core3sys5posix6signal6sigval6__initZ@Base 12
+ _D4core3sys5posix6signal7stack_t6__initZ@Base 12
+ _D4core3sys5posix6signal8SIGRTMAXUNbNdNiZ3sigi@Base 12
+ _D4core3sys5posix6signal8SIGRTMINUNbNdNiZ3sigi@Base 12
+ _D4core3sys5posix6signal8sigevent6__initZ@Base 12
+ _D4core3sys5posix6signal8sigset_t6__initZ@Base 12
+ _D4core3sys5posix6signal8sigstack6__initZ@Base 12
+ _D4core3sys5posix6signal8timespec6__initZ@Base 12
+ _D4core3sys5posix6signal9siginfo_t11_sifields_t10_sigpoll_t6__initZ@Base 12
+ _D4core3sys5posix6signal9siginfo_t11_sifields_t11_sigchild_t6__initZ@Base 12
+ _D4core3sys5posix6signal9siginfo_t11_sifields_t11_sigfault_t6__initZ@Base 12
+ _D4core3sys5posix6signal9siginfo_t11_sifields_t5_rt_t6__initZ@Base 12
+ _D4core3sys5posix6signal9siginfo_t11_sifields_t6__initZ@Base 12
+ _D4core3sys5posix6signal9siginfo_t11_sifields_t7_kill_t6__initZ@Base 12
+ _D4core3sys5posix6signal9siginfo_t11_sifields_t8_timer_t6__initZ@Base 12
+ _D4core3sys5posix6signal9siginfo_t6__initZ@Base 12
+ _D4core3sys5posix6signal9siginfo_t6si_pidMUNbNcNdNiNjZi@Base 12
+ _D4core3sys5posix6signal9siginfo_t6si_uidMUNbNcNdNiNjZk@Base 12
+ _D4core3sys5posix6signal9siginfo_t7si_addrMUNbNcNdNiNjZPv@Base 12
+ _D4core3sys5posix6signal9siginfo_t7si_bandMUNbNcNdNiNjZl@Base 12
+ _D4core3sys5posix6signal9siginfo_t8si_valueMUNbNcNdNiNjZSQCdQCbQCaQBx6sigval@Base 12
+ _D4core3sys5posix6signal9siginfo_t9si_statusMUNbNcNdNiNjZi@Base 12
+ _D4core3sys5posix6stdlib11__moduleRefZ@Base 12
+ _D4core3sys5posix6stdlib12__ModuleInfoZ@Base 12
+ _D4core3sys5posix6string11__moduleRefZ@Base 12
+ _D4core3sys5posix6string12__ModuleInfoZ@Base 12
+ _D4core3sys5posix6syslog11__moduleRefZ@Base 12
+ _D4core3sys5posix6syslog12__ModuleInfoZ@Base 12
+ _D4core3sys5posix6unistd11__moduleRefZ@Base 12
+ _D4core3sys5posix6unistd12__ModuleInfoZ@Base 12
+ _D4core3sys5posix7netinet3in_11__moduleRefZ@Base 12
+ _D4core3sys5posix7netinet3in_11sockaddr_in6__initZ@Base 12
+ _D4core3sys5posix7netinet3in_12__ModuleInfoZ@Base 12
+ _D4core3sys5posix7netinet3in_12sockaddr_in66__initZ@Base 12
+ _D4core3sys5posix7netinet3in_20IN6_IS_ADDR_LOOPBACKFNaNbNiPSQCgQCeQCdQCaQBv8in6_addrZi@Base 12
+ _D4core3sys5posix7netinet3in_20IN6_IS_ADDR_V4COMPATFNaNbNiPSQCgQCeQCdQCaQBv8in6_addrZi@Base 12
+ _D4core3sys5posix7netinet3in_20IN6_IS_ADDR_V4MAPPEDFNaNbNiPSQCgQCeQCdQCaQBv8in6_addrZi@Base 12
+ _D4core3sys5posix7netinet3in_21IN6_IS_ADDR_LINKLOCALFNaNbNiPSQChQCfQCeQCbQBw8in6_addrZi@Base 12
+ _D4core3sys5posix7netinet3in_21IN6_IS_ADDR_MC_GLOBALFNaNbNiPSQChQCfQCeQCbQBw8in6_addrZi@Base 12
+ _D4core3sys5posix7netinet3in_21IN6_IS_ADDR_MULTICASTFNaNbNiPSQChQCfQCeQCbQBw8in6_addrZi@Base 12
+ _D4core3sys5posix7netinet3in_21IN6_IS_ADDR_SITELOCALFNaNbNiPSQChQCfQCeQCbQBw8in6_addrZi@Base 12
+ _D4core3sys5posix7netinet3in_23IN6_IS_ADDR_MC_ORGLOCALFNaNbNiPSQCjQChQCgQCdQBy8in6_addrZi@Base 12
+ _D4core3sys5posix7netinet3in_23IN6_IS_ADDR_UNSPECIFIEDFNaNbNiPSQCjQChQCgQCdQBy8in6_addrZi@Base 12
+ _D4core3sys5posix7netinet3in_24IN6_IS_ADDR_MC_LINKLOCALFNaNbNiPSQCkQCiQChQCeQBz8in6_addrZi@Base 12
+ _D4core3sys5posix7netinet3in_24IN6_IS_ADDR_MC_NODELOCALFNaNbNiPSQCkQCiQChQCeQBz8in6_addrZi@Base 12
+ _D4core3sys5posix7netinet3in_24IN6_IS_ADDR_MC_SITELOCALFNaNbNiPSQCkQCiQChQCeQBz8in6_addrZi@Base 12
+ _D4core3sys5posix7netinet3in_8in6_addr6__initZ@Base 12
+ _D4core3sys5posix7netinet3in_9ipv6_mreq6__initZ@Base 12
+ _D4core3sys5posix7netinet3tcp11__moduleRefZ@Base 12
+ _D4core3sys5posix7netinet3tcp12__ModuleInfoZ@Base 12
+ _D4core3sys5posix7pthread11__moduleRefZ@Base 12
+ _D4core3sys5posix7pthread12__ModuleInfoZ@Base 12
+ _D4core3sys5posix7pthread15pthread_cleanup6__initZ@Base 12
+ _D4core3sys5posix7pthread15pthread_cleanup__T3popZQfMFNbiZv@Base 12
+ _D4core3sys5posix7pthread15pthread_cleanup__T4pushHTPUNaNbNiPvZvZQuMFNbNiQvQpZv@Base 12
+ _D4core3sys5posix7pthread23_pthread_cleanup_buffer6__initZ@Base 12
+ _D4core3sys5posix7strings11__moduleRefZ@Base 12
+ _D4core3sys5posix7strings12__ModuleInfoZ@Base 12
+ _D4core3sys5posix7termios11__moduleRefZ@Base 12
+ _D4core3sys5posix7termios12__ModuleInfoZ@Base 12
+ _D4core3sys5posix7termiosQi6__initZ@Base 12
+ _D4core3sys5posix8inttypes11__moduleRefZ@Base 12
+ _D4core3sys5posix8inttypes12__ModuleInfoZ@Base 12
+ _D4core3sys5posix8ucontext10mcontext_t6__initZ@Base 12
+ _D4core3sys5posix8ucontext10ucontext_t6__initZ@Base 12
+ _D4core3sys5posix8ucontext11__moduleRefZ@Base 12
+ _D4core3sys5posix8ucontext12__ModuleInfoZ@Base 12
+ _D4core3sys5posix8ucontext12_libc_fpxreg6__initZ@Base 12
+ _D4core3sys5posix8ucontext12_libc_xmmreg6__initZ@Base 12
+ _D4core3sys5posix8ucontext13_libc_fpstate6__initZ@Base 12
+ _D4core3sys5posix9semaphore11__moduleRefZ@Base 12
+ _D4core3sys5posix9semaphore12__ModuleInfoZ@Base 12
+ _D4core3sys5posix9semaphore17_pthread_fastlock6__initZ@Base 12
+ _D4core3sys5posix9semaphore5sem_t6__initZ@Base 12
+ _D4core3sys5posixQk2un11__moduleRefZ@Base 12
+ _D4core3sys5posixQk2un11sockaddr_un6__initZ@Base 12
+ _D4core3sys5posixQk2un12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk3ipc11__moduleRefZ@Base 12
+ _D4core3sys5posixQk3ipc12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk3ipc8ipc_perm6__initZ@Base 12
+ _D4core3sys5posixQk3msg11__moduleRefZ@Base 12
+ _D4core3sys5posixQk3msg12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk3msg6msgbuf6__initZ@Base 12
+ _D4core3sys5posixQk3msg7msginfo6__initZ@Base 12
+ _D4core3sys5posixQk3msg8msqid_ds6__initZ@Base 12
+ _D4core3sys5posixQk3shm11__moduleRefZ@Base 12
+ _D4core3sys5posixQk3shm12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk3shm8shmid_ds6__initZ@Base 12
+ _D4core3sys5posixQk3uio11__moduleRefZ@Base 12
+ _D4core3sys5posixQk3uio12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk3uio5iovec6__initZ@Base 12
+ _D4core3sys5posixQk4mman11__moduleRefZ@Base 12
+ _D4core3sys5posixQk4mman12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk4stat11__moduleRefZ@Base 12
+ _D4core3sys5posixQk4stat12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk4stat6stat_t6__initZ@Base 12
+ _D4core3sys5posixQk4stat7S_ISBLKFNbNikZb@Base 12
+ _D4core3sys5posixQk4stat7S_ISCHRFNbNikZb@Base 12
+ _D4core3sys5posixQk4stat7S_ISDIRFNbNikZb@Base 12
+ _D4core3sys5posixQk4stat7S_ISLNKFNbNikZb@Base 12
+ _D4core3sys5posixQk4stat7S_ISREGFNbNikZb@Base 12
+ _D4core3sys5posixQk4stat8S_ISFIFOFNbNikZb@Base 12
+ _D4core3sys5posixQk4stat8S_ISSOCKFNbNikZb@Base 12
+ _D4core3sys5posixQk4stat8S_ISTYPEFNbNikkZb@Base 12
+ _D4core3sys5posixQk4time11__moduleRefZ@Base 12
+ _D4core3sys5posixQk4time12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk4time7timeval6__initZ@Base 12
+ _D4core3sys5posixQk4time9itimerval6__initZ@Base 12
+ _D4core3sys5posixQk4wait10WIFSTOPPEDFNaNbNiNfiZb@Base 12
+ _D4core3sys5posixQk4wait10__WTERMSIGFNaNbNiNfiZi@Base 12
+ _D4core3sys5posixQk4wait11WEXITSTATUSFNaNbNiNfiZi@Base 12
+ _D4core3sys5posixQk4wait11WIFSIGNALEDFNaNbNiNfiZb@Base 12
+ _D4core3sys5posixQk4wait11__moduleRefZ@Base 12
+ _D4core3sys5posixQk4wait12WIFCONTINUEDFNaNbNiNfiZi@Base 12
+ _D4core3sys5posixQk4wait12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk4wait8WSTOPSIGFNaNbNiNfiZi@Base 12
+ _D4core3sys5posixQk4wait8WTERMSIGFNaNbNiNfiZi@Base 12
+ _D4core3sys5posixQk4wait9WIFEXITEDFNaNbNiNfiZb@Base 12
+ _D4core3sys5posixQk5filio11__moduleRefZ@Base 12
+ _D4core3sys5posixQk5filio12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk5ioctl11__moduleRefZ@Base 12
+ _D4core3sys5posixQk5ioctl12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk5ioctl3_IOFNbNiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl6termio6__initZ@Base 12
+ _D4core3sys5posixQk5ioctl7_IOC_NRFNbNiiZi@Base 12
+ _D4core3sys5posixQk5ioctl7winsize6__initZ@Base 12
+ _D4core3sys5posixQk5ioctl8_IOC_DIRFNbNiiZi@Base 12
+ _D4core3sys5posixQk5ioctl8termios26__initZ@Base 12
+ _D4core3sys5posixQk5ioctl9_IOC_SIZEFNbNiiZi@Base 12
+ _D4core3sys5posixQk5ioctl9_IOC_TYPEFNbNiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IOCTPaZQjFNaNbNiiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IOCTPmZQjFNaNbNiiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IOCTSQBhQBfQBeQBlQBc8termios2ZQBgFNaNbNiiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IOCTiZQiFNaNbNiiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IOCTkZQiFNaNbNiiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IOCTmZQiFNaNbNiiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IOCTnZQiFNaNbNiiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IORTPmZQjFNaNbNiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IORTSQBhQBfQBeQBlQBc8termios2ZQBgFNaNbNiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IORTkZQiFNaNbNiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IORTmZQiFNaNbNiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IOWTPaZQjFNaNbNiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IOWTSQBhQBfQBeQBlQBc8termios2ZQBgFNaNbNiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IOWTiZQiFNaNbNiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IOWTkZQiFNaNbNiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IOWTmZQiFNaNbNiiiZi@Base 12
+ _D4core3sys5posixQk5types11__moduleRefZ@Base 12
+ _D4core3sys5posixQk5types12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk5types14pthread_attr_t6__initZ@Base 12
+ _D4core3sys5posixQk5types14pthread_cond_t6__initZ@Base 12
+ _D4core3sys5posixQk5types15pthread_mutex_t6__initZ@Base 12
+ _D4core3sys5posixQk5types16pthread_rwlock_t6__initZ@Base 12
+ _D4core3sys5posixQk5types17_pthread_fastlock6__initZ@Base 12
+ _D4core3sys5posixQk5types17pthread_barrier_t6__initZ@Base 12
+ _D4core3sys5posixQk5types18pthread_condattr_t6__initZ@Base 12
+ _D4core3sys5posixQk5types19pthread_mutexattr_t6__initZ@Base 12
+ _D4core3sys5posixQk5types20pthread_rwlockattr_t6__initZ@Base 12
+ _D4core3sys5posixQk5types21pthread_barrierattr_t6__initZ@Base 12
+ _D4core3sys5posixQk6ioccom11__moduleRefZ@Base 12
+ _D4core3sys5posixQk6ioccom12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk6select11__moduleRefZ@Base 12
+ _D4core3sys5posixQk6select12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk6select6FD_CLRFNaNbNiiPSQBpQBnQBmQBtQBk6fd_setZv@Base 12
+ _D4core3sys5posixQk6select6FD_SETFNaNbNiiPSQBpQBnQBmQBtQBk6fd_setZv@Base 12
+ _D4core3sys5posixQk6select6fd_set6__initZ@Base 12
+ _D4core3sys5posixQk6select7FD_ZEROFNaNbNiPSQBpQBnQBmQBtQBk6fd_setZv@Base 12
+ _D4core3sys5posixQk6select7__FDELTFNaNbNiiZk@Base 12
+ _D4core3sys5posixQk6select8FD_ISSETFNaNbNiiPxSQBsQBqQBpQBwQBn6fd_setZb@Base 12
+ _D4core3sys5posixQk6select8__FDMASKFNaNbNiiZl@Base 12
+ _D4core3sys5posixQk6socket10CMSG_ALIGNFNaNbNimZm@Base 12
+ _D4core3sys5posixQk6socket10CMSG_SPACEFNaNbNimZm@Base 12
+ _D4core3sys5posixQk6socket11CMSG_NXTHDRFNaNbNiPNgSQBwQBuQBtQCaQBr6msghdrPNgSQCwQCuQCtQDaQCr7cmsghdrZQBc@Base 12
+ _D4core3sys5posixQk6socket11__moduleRefZ@Base 12
+ _D4core3sys5posixQk6socket12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk6socket13CMSG_FIRSTHDRFNaNbNiPNgSQByQBwQBvQCcQBt6msghdrZPNgSQCzQCxQCwQDdQCu7cmsghdr@Base 12
+ _D4core3sys5posixQk6socket16sockaddr_storage6__initZ@Base 12
+ _D4core3sys5posixQk6socket6linger6__initZ@Base 12
+ _D4core3sys5posixQk6socket6msghdr6__initZ@Base 12
+ _D4core3sys5posixQk6socket7cmsghdr6__initZ@Base 12
+ _D4core3sys5posixQk6socket8CMSG_LENFNaNbNimZm@Base 12
+ _D4core3sys5posixQk6socket8sockaddr6__initZ@Base 12
+ _D4core3sys5posixQk6socket9CMSG_DATAFNaNbNiNkMPNgSQBwQBuQBtQCaQBr7cmsghdrZPNgh@Base 12
+ _D4core3sys5posixQk6ttycom11__moduleRefZ@Base 12
+ _D4core3sys5posixQk6ttycom12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk7statvfs11__moduleRefZ@Base 12
+ _D4core3sys5posixQk7statvfs12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk7statvfs5FFlag6__initZ@Base 12
+ _D4core3sys5posixQk7statvfs9statvfs_t6__initZ@Base 12
+ _D4core3sys5posixQk7utsname11__moduleRefZ@Base 12
+ _D4core3sys5posixQk7utsname12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk7utsnameQi6__initZ@Base 12
+ _D4core3sys5posixQk8resource11__moduleRefZ@Base 12
+ _D4core3sys5posixQk8resource12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk8resource6rlimit6__initZ@Base 12
+ _D4core3sys5posixQk8resource6rusage6__initZ@Base 12
+ _D4core4math11__moduleRefZ@Base 12
+ _D4core4math12__ModuleInfoZ@Base 12
+ _D4core4simd11__moduleRefZ@Base 12
+ _D4core4simd12__ModuleInfoZ@Base 12
+ _D4core4stdc4fenv11__moduleRefZ@Base 12
+ _D4core4stdc4fenv12__ModuleInfoZ@Base 12
+ _D4core4stdc4fenv6fenv_t6__initZ@Base 12
+ _D4core4stdc4math11__moduleRefZ@Base 12
+ _D4core4stdc4math11islessequalFNaNbNiNeddZi@Base 12
+ _D4core4stdc4math11islessequalFNaNbNiNeeeZi@Base 12
+ _D4core4stdc4math11islessequalFNaNbNiNeffZi@Base 12
+ _D4core4stdc4math11isunorderedFNaNbNiNeddZi@Base 12
+ _D4core4stdc4math11isunorderedFNaNbNiNeeeZi@Base 12
+ _D4core4stdc4math11isunorderedFNaNbNiNeffZi@Base 12
+ _D4core4stdc4math12__ModuleInfoZ@Base 12
+ _D4core4stdc4math13islessgreaterFNaNbNiNeddZi@Base 12
+ _D4core4stdc4math13islessgreaterFNaNbNiNeeeZi@Base 12
+ _D4core4stdc4math13islessgreaterFNaNbNiNeffZi@Base 12
+ _D4core4stdc4math14isgreaterequalFNaNbNiNeddZi@Base 12
+ _D4core4stdc4math14isgreaterequalFNaNbNiNeeeZi@Base 12
+ _D4core4stdc4math14isgreaterequalFNaNbNiNeffZi@Base 12
+ _D4core4stdc4math6islessFNaNbNiNeddZi@Base 12
+ _D4core4stdc4math6islessFNaNbNiNeeeZi@Base 12
+ _D4core4stdc4math6islessFNaNbNiNeffZi@Base 12
+ _D4core4stdc4math8isnormalFNaNbNiNedZi@Base 12
+ _D4core4stdc4math8isnormalFNaNbNiNeeZi@Base 12
+ _D4core4stdc4math8isnormalFNaNbNiNefZi@Base 12
+ _D4core4stdc4math9isgreaterFNaNbNiNeddZi@Base 12
+ _D4core4stdc4math9isgreaterFNaNbNiNeeeZi@Base 12
+ _D4core4stdc4math9isgreaterFNaNbNiNeffZi@Base 12
+ _D4core4stdc4time11__moduleRefZ@Base 12
+ _D4core4stdc4time12__ModuleInfoZ@Base 12
+ _D4core4stdc5ctype11__moduleRefZ@Base 12
+ _D4core4stdc5ctype12__ModuleInfoZ@Base 12
+ _D4core4stdc5errno11__moduleRefZ@Base 12
+ _D4core4stdc5errno12__ModuleInfoZ@Base 12
+ _D4core4stdc5stdio11__moduleRefZ@Base 12
+ _D4core4stdc5stdio12__ModuleInfoZ@Base 12
+ _D4core4stdc5stdio6fpos_t6__initZ@Base 12
+ _D4core4stdc5stdio8_IO_FILE6__initZ@Base 12
+ _D4core4stdc6config11__moduleRefZ@Base 12
+ _D4core4stdc6config12__ModuleInfoZ@Base 12
+ _D4core4stdc6config__T8_ComplexTdZQm11__xopEqualsMxFKxSQCbQBzQBx__TQBtTdZQBzZb@Base 12
+ _D4core4stdc6config__T8_ComplexTdZQm6__initZ@Base 12
+ _D4core4stdc6config__T8_ComplexTdZQm9__xtoHashFNbNeKxSQCaQByQBw__TQBsTdZQByZm@Base 12
+ _D4core4stdc6config__T8_ComplexTeZQm11__xopEqualsMxFKxSQCbQBzQBx__TQBtTeZQBzZb@Base 12
+ _D4core4stdc6config__T8_ComplexTeZQm6__initZ@Base 12
+ _D4core4stdc6config__T8_ComplexTeZQm9__xtoHashFNbNeKxSQCaQByQBw__TQBsTeZQByZm@Base 12
+ _D4core4stdc6config__T8_ComplexTfZQm11__xopEqualsMxFKxSQCbQBzQBx__TQBtTfZQBzZb@Base 12
+ _D4core4stdc6config__T8_ComplexTfZQm6__initZ@Base 12
+ _D4core4stdc6config__T8_ComplexTfZQm9__xtoHashFNbNeKxSQCaQByQBw__TQBsTfZQByZm@Base 12
+ _D4core4stdc6float_11__moduleRefZ@Base 12
+ _D4core4stdc6float_12__ModuleInfoZ@Base 12
+ _D4core4stdc6limits11__moduleRefZ@Base 12
+ _D4core4stdc6limits12__ModuleInfoZ@Base 12
+ _D4core4stdc6locale11__moduleRefZ@Base 12
+ _D4core4stdc6locale12__ModuleInfoZ@Base 12
+ _D4core4stdc6locale5lconv6__initZ@Base 12
+ _D4core4stdc6signal11__moduleRefZ@Base 12
+ _D4core4stdc6signal12__ModuleInfoZ@Base 12
+ _D4core4stdc6stdarg11__moduleRefZ@Base 12
+ _D4core4stdc6stdarg12__ModuleInfoZ@Base 12
+ _D4core4stdc6stddef11__moduleRefZ@Base 12
+ _D4core4stdc6stddef12__ModuleInfoZ@Base 12
+ _D4core4stdc6stdint11__moduleRefZ@Base 12
+ _D4core4stdc6stdint12__ModuleInfoZ@Base 12
+ _D4core4stdc6stdint__T7_typifyTgZQlFNaNbNiNfgZg@Base 12
+ _D4core4stdc6stdint__T7_typifyThZQlFNaNbNiNfhZh@Base 12
+ _D4core4stdc6stdint__T7_typifyTiZQlFNaNbNiNfiZi@Base 12
+ _D4core4stdc6stdint__T7_typifyTkZQlFNaNbNiNfkZk@Base 12
+ _D4core4stdc6stdint__T7_typifyTlZQlFNaNbNiNflZl@Base 12
+ _D4core4stdc6stdint__T7_typifyTmZQlFNaNbNiNfmZm@Base 12
+ _D4core4stdc6stdint__T7_typifyTsZQlFNaNbNiNfsZs@Base 12
+ _D4core4stdc6stdint__T7_typifyTtZQlFNaNbNiNftZt@Base 12
+ _D4core4stdc6stdlib11__moduleRefZ@Base 12
+ _D4core4stdc6stdlib12__ModuleInfoZ@Base 12
+ _D4core4stdc6stdlib5div_t6__initZ@Base 12
+ _D4core4stdc6stdlib6ldiv_t6__initZ@Base 12
+ _D4core4stdc6stdlib7lldiv_t6__initZ@Base 12
+ _D4core4stdc6string11__moduleRefZ@Base 12
+ _D4core4stdc6string12__ModuleInfoZ@Base 12
+ _D4core4stdc6tgmath11__moduleRefZ@Base 12
+ _D4core4stdc6tgmath12__ModuleInfoZ@Base 12
+ _D4core4stdc6wchar_11__moduleRefZ@Base 12
+ _D4core4stdc6wchar_12__ModuleInfoZ@Base 12
+ _D4core4stdc6wchar_8getwcharFNbNiNeZw@Base 12
+ _D4core4stdc6wchar_8putwcharFNbNiNewZw@Base 12
+ _D4core4stdc6wchar_9mbstate_t6__initZ@Base 12
+ _D4core4stdc6wchar_9mbstate_t8___value6__initZ@Base 12
+ _D4core4stdc6wctype11__moduleRefZ@Base 12
+ _D4core4stdc6wctype12__ModuleInfoZ@Base 12
+ _D4core4stdc7assert_11__moduleRefZ@Base 12
+ _D4core4stdc7assert_12__ModuleInfoZ@Base 12
+ _D4core4stdc7complex11__moduleRefZ@Base 12
+ _D4core4stdc7complex12__ModuleInfoZ@Base 12
+ _D4core4stdc8inttypes11__moduleRefZ@Base 12
+ _D4core4stdc8inttypes12__ModuleInfoZ@Base 12
+ _D4core4stdc8inttypes9imaxdiv_t6__initZ@Base 12
+ _D4core4sync5event11__moduleRefZ@Base 12
+ _D4core4sync5event12__ModuleInfoZ@Base 12
+ _D4core4sync5event5Event10initializeMFNbNibbZv@Base 12
+ _D4core4sync5event5Event14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core4sync5event5Event3setMFNbNiZv@Base 12
+ _D4core4sync5event5Event4waitMFNbNiSQBi4time8DurationZb@Base 12
+ _D4core4sync5event5Event4waitMFNbNiZb@Base 12
+ _D4core4sync5event5Event5resetMFNbNiZv@Base 12
+ _D4core4sync5event5Event6__ctorMFNbNcNibbZSQBpQBnQBlQBi@Base 12
+ _D4core4sync5event5Event6__dtorMFNbNiZv@Base 12
+ _D4core4sync5event5Event6__initZ@Base 12
+ _D4core4sync5event5Event9terminateMFNbNiZv@Base 12
+ _D4core4sync5mutex11__moduleRefZ@Base 12
+ _D4core4sync5mutex12__ModuleInfoZ@Base 12
+ _D4core4sync5mutex5Mutex10handleAddrMFZPSQBn3sys5posixQk5types15pthread_mutex_t@Base 12
+ _D4core4sync5mutex5Mutex12MonitorProxy11__xopEqualsMxFKxSQCdQCbQBzQBwQBtZb@Base 12
+ _D4core4sync5mutex5Mutex12MonitorProxy6__initZ@Base 12
+ _D4core4sync5mutex5Mutex12MonitorProxy9__xtoHashFNbNeKxSQCcQCaQByQBvQBsZm@Base 12
+ _D4core4sync5mutex5Mutex4lockMFNeZv@Base 12
+ _D4core4sync5mutex5Mutex4lockMOFNeZv@Base 12
+ _D4core4sync5mutex5Mutex6__ctorMFNbNiNeC6ObjectZCQBvQBtQBrQBo@Base 12
+ _D4core4sync5mutex5Mutex6__ctorMFNbNiNeZCQBnQBlQBjQBg@Base 12
+ _D4core4sync5mutex5Mutex6__ctorMOFNbNiNeC6ObjectZOCQBxQBvQBtQBq@Base 12
+ _D4core4sync5mutex5Mutex6__ctorMOFNbNiNeZOCQBpQBnQBlQBi@Base 12
+ _D4core4sync5mutex5Mutex6__dtorMFNbNiNeZv@Base 12
+ _D4core4sync5mutex5Mutex6__initZ@Base 12
+ _D4core4sync5mutex5Mutex6__vtblZ@Base 12
+ _D4core4sync5mutex5Mutex6unlockMFNeZv@Base 12
+ _D4core4sync5mutex5Mutex6unlockMOFNeZv@Base 12
+ _D4core4sync5mutex5Mutex7__ClassZ@Base 12
+ _D4core4sync5mutex5Mutex7tryLockMFNeZb@Base 12
+ _D4core4sync5mutex5Mutex7tryLockMOFNeZb@Base 12
+ _D4core4sync5mutex5Mutex__T12lock_nothrowTCQBpQBnQBlQBiZQBdMFNbNiNeZv@Base 12
+ _D4core4sync5mutex5Mutex__T12lock_nothrowTOCQBqQBoQBmQBjZQBeMOFNbNiNeZv@Base 12
+ _D4core4sync5mutex5Mutex__T14unlock_nothrowTCQBrQBpQBnQBkZQBfMFNbNiNeZv@Base 12
+ _D4core4sync5mutex5Mutex__T14unlock_nothrowTOCQBsQBqQBoQBlZQBgMOFNbNiNeZv@Base 12
+ _D4core4sync5mutex5Mutex__T15tryLock_nothrowTCQBsQBqQBoQBlZQBgMFNbNiNeZb@Base 12
+ _D4core4sync5mutex5Mutex__T15tryLock_nothrowTOCQBtQBrQBpQBmZQBhMOFNbNiNeZb@Base 12
+ _D4core4sync5mutex5Mutex__T6__ctorTCQBiQBgQBeQBbZQwMFNbNiNeC6ObjectbZQBi@Base 12
+ _D4core4sync5mutex5Mutex__T6__ctorTCQBiQBgQBeQBbZQwMFNbNiNebZQBa@Base 12
+ _D4core4sync5mutex5Mutex__T6__ctorTOCQBjQBhQBfQBcZQxMOFNbNiNeC6ObjectbZOQBk@Base 12
+ _D4core4sync5mutex5Mutex__T6__ctorTOCQBjQBhQBfQBcZQxMOFNbNiNebZOQBc@Base 12
+ _D4core4sync6config11__moduleRefZ@Base 12
+ _D4core4sync6config12__ModuleInfoZ@Base 12
+ _D4core4sync6config7mktspecFNbNiKSQBg3sys5posix6signal8timespecSQCk4time8DurationZv@Base 12
+ _D4core4sync6config7mktspecFNbNiKSQBg3sys5posix6signal8timespecZv@Base 12
+ _D4core4sync6config7mvtspecFNbNiKSQBg3sys5posix6signal8timespecSQCk4time8DurationZv@Base 12
+ _D4core4sync7barrier11__moduleRefZ@Base 12
+ _D4core4sync7barrier12__ModuleInfoZ@Base 12
+ _D4core4sync7barrier7Barrier4waitMFZv@Base 12
+ _D4core4sync7barrier7Barrier6__ctorMFkZCQBmQBkQBiQBd@Base 12
+ _D4core4sync7barrier7Barrier6__initZ@Base 12
+ _D4core4sync7barrier7Barrier6__vtblZ@Base 12
+ _D4core4sync7barrier7Barrier7__ClassZ@Base 12
+ _D4core4sync7rwmutex11__moduleRefZ@Base 12
+ _D4core4sync7rwmutex12__ModuleInfoZ@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader12MonitorProxy11__xopEqualsMxFKxSQCwQCuQCsQCnQCaQBwZb@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader12MonitorProxy6__initZ@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader12MonitorProxy9__xtoHashFNbNeKxSQCvQCtQCrQCmQBzQBvZm@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader4lockMFNeZv@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader4lockMOFNeZv@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader6__initZ@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader6__vtblZ@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader6unlockMFNeZv@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader6unlockMOFNeZv@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader7__ClassZ@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader7tryLockMFNeSQCc4time8DurationZb@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader7tryLockMFNeZb@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader7tryLockMOFNeSQCd4time8DurationZb@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader7tryLockMOFNeZb@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader__T17shouldQueueReaderTCQCnQClQCjQCeQBrZQBlMFNaNbNdNiNfZb@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader__T17shouldQueueReaderTOCQCoQCmQCkQCfQBsZQBmMOFNaNbNdNiNfZb@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader__T6__ctorTCQCbQBzQBxQBsQBfZQzMFNaNbNiNeZQBe@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader__T6__ctorTOCQCcQCaQByQBtQBgZQBaMOFNaNbNiNeZOQBh@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer12MonitorProxy11__xopEqualsMxFKxSQCwQCuQCsQCnQCaQBwZb@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer12MonitorProxy6__initZ@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer12MonitorProxy9__xtoHashFNbNeKxSQCvQCtQCrQCmQBzQBvZm@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer4lockMFNeZv@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer4lockMOFNeZv@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer6__initZ@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer6__vtblZ@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer6unlockMFNeZv@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer6unlockMOFNeZv@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer7__ClassZ@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer7tryLockMFNeSQCc4time8DurationZb@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer7tryLockMFNeZb@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer7tryLockMOFNeSQCd4time8DurationZb@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer7tryLockMOFNeZb@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer__T17shouldQueueWriterTCQCnQClQCjQCeQBrZQBlMFNaNbNdNiNfZb@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer__T17shouldQueueWriterTOCQCoQCmQCkQCfQBsZQBmMOFNaNbNdNiNfZb@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer__T6__ctorTCQCbQBzQBxQBsQBfZQzMFNaNbNiNeZQBe@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer__T6__ctorTOCQCcQCaQByQBtQBgZQBaMOFNaNbNiNeZOQBh@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6__ctorMFNbNfEQBwQBuQBsQBn6PolicyZCQCrQCpQCnQCi@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6__ctorMOFNbNfEQBxQBvQBtQBo6PolicyZOCQCtQCrQCpQCk@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6__initZ@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6__vtblZ@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6policyMFNbNdNfZEQBzQBxQBvQBq6Policy@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6policyMOFNbNdNfZEQCaQByQBwQBr6Policy@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6readerMFNbNdNfZCQBzQBxQBvQBq6Reader@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6readerMOFNbNdNfZOCQCbQBzQBxQBs6Reader@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6writerMFNbNdNfZCQBzQBxQBvQBq6Writer@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6writerMOFNbNdNfZOCQCbQBzQBxQBs6Writer@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex7__ClassZ@Base 12
+ _D4core4sync9condition11__moduleRefZ@Base 12
+ _D4core4sync9condition12__ModuleInfoZ@Base 12
+ _D4core4sync9condition9Condition13mutex_nothrowMFNaNbNdNiNfZCQChQCf5mutex5Mutex@Base 12
+ _D4core4sync9condition9Condition13mutex_nothrowMOFNaNbNdNiNfZOCQCjQCh5mutex5Mutex@Base 12
+ _D4core4sync9condition9Condition4waitMFSQBm4time8DurationZb@Base 12
+ _D4core4sync9condition9Condition4waitMFZv@Base 12
+ _D4core4sync9condition9Condition4waitMOFSQBn4time8DurationZb@Base 12
+ _D4core4sync9condition9Condition4waitMOFZv@Base 12
+ _D4core4sync9condition9Condition5mutexMFNdZCQBqQBoQs5Mutex@Base 12
+ _D4core4sync9condition9Condition5mutexMOFNdZOCQBsQBqQu5Mutex@Base 12
+ _D4core4sync9condition9Condition6__ctorMFNbNfCQBsQBq5mutex5MutexZCQCmQCkQCiQCb@Base 12
+ _D4core4sync9condition9Condition6__ctorMOFNbNfOCQBuQBs5mutex5MutexZOCQCpQCnQClQCe@Base 12
+ _D4core4sync9condition9Condition6__dtorMFZv@Base 12
+ _D4core4sync9condition9Condition6__initZ@Base 12
+ _D4core4sync9condition9Condition6__vtblZ@Base 12
+ _D4core4sync9condition9Condition6notifyMFZv@Base 12
+ _D4core4sync9condition9Condition6notifyMOFZv@Base 12
+ _D4core4sync9condition9Condition7__ClassZ@Base 12
+ _D4core4sync9condition9Condition9notifyAllMFZv@Base 12
+ _D4core4sync9condition9Condition9notifyAllMOFZv@Base 12
+ _D4core4sync9condition9Condition__T4waitTCQBoQBmQBkQBdZQuMFSQCg4time8DurationbZb@Base 12
+ _D4core4sync9condition9Condition__T4waitTCQBoQBmQBkQBdZQuMFbZv@Base 12
+ _D4core4sync9condition9Condition__T4waitTOCQBpQBnQBlQBeZQvMOFSQCi4time8DurationbZb@Base 12
+ _D4core4sync9condition9Condition__T4waitTOCQBpQBnQBlQBeZQvMOFbZv@Base 12
+ _D4core4sync9condition9Condition__T6__ctorTCQBqQBoQBmQBfTCQCeQCc5mutex5MutexZQBqMFNbNeQBdbZQBw@Base 12
+ _D4core4sync9condition9Condition__T6__ctorTOCQBrQBpQBnQBgTOCQCgQCe5mutex5MutexZQBsMOFNbNeOQBfbZOQCa@Base 12
+ _D4core4sync9condition9Condition__T6notifyTCQBqQBoQBmQBfZQwMFNbbZv@Base 12
+ _D4core4sync9condition9Condition__T6notifyTOCQBrQBpQBnQBgZQxMOFNbbZv@Base 12
+ _D4core4sync9condition9Condition__T9notifyAllTCQBtQBrQBpQBiZQzMFNbbZv@Base 12
+ _D4core4sync9condition9Condition__T9notifyAllTOCQBuQBsQBqQBjZQBaMOFNbbZv@Base 12
+ _D4core4sync9exception11__moduleRefZ@Base 12
+ _D4core4sync9exception12__ModuleInfoZ@Base 12
+ _D4core4sync9exception9SyncError6__ctorMFNaNbNfAyaC6object9ThrowableQvmZCQCtQCrQCpQCi@Base 12
+ _D4core4sync9exception9SyncError6__ctorMFNaNbNfAyaQdmC6object9ThrowableZCQCtQCrQCpQCi@Base 12
+ _D4core4sync9exception9SyncError6__initZ@Base 12
+ _D4core4sync9exception9SyncError6__vtblZ@Base 12
+ _D4core4sync9exception9SyncError7__ClassZ@Base 12
+ _D4core4sync9semaphore11__moduleRefZ@Base 12
+ _D4core4sync9semaphore12__ModuleInfoZ@Base 12
+ _D4core4sync9semaphore9Semaphore4waitMFSQBm4time8DurationZb@Base 12
+ _D4core4sync9semaphore9Semaphore4waitMFZv@Base 12
+ _D4core4sync9semaphore9Semaphore6__ctorMFkZCQBqQBoQBmQBf@Base 12
+ _D4core4sync9semaphore9Semaphore6__dtorMFZv@Base 12
+ _D4core4sync9semaphore9Semaphore6__initZ@Base 12
+ _D4core4sync9semaphore9Semaphore6__vtblZ@Base 12
+ _D4core4sync9semaphore9Semaphore6notifyMFZv@Base 12
+ _D4core4sync9semaphore9Semaphore7__ClassZ@Base 12
+ _D4core4sync9semaphore9Semaphore7tryWaitMFZb@Base 12
+ _D4core4time11__moduleRefZ@Base 12
+ _D4core4time11_posixClockFNaNbNiNfEQBhQBf9ClockTypeZi@Base 12
+ _D4core4time12TickDuration11ticksPerSecyl@Base 12
+ _D4core4time12TickDuration14currSystemTickFNbNdNiNeZSQBzQBxQBv@Base 12
+ _D4core4time12TickDuration27_sharedStaticCtor_L2825_C14FNeZv@Base 12
+ _D4core4time12TickDuration3maxFNaNbNdNiNfZSQBpQBnQBl@Base 12
+ _D4core4time12TickDuration3minFNaNbNdNiNfZSQBpQBnQBl@Base 12
+ _D4core4time12TickDuration4zeroFNaNbNdNiNfZSQBqQBoQBm@Base 12
+ _D4core4time12TickDuration5msecsMxFNaNbNdNiNfZl@Base 12
+ _D4core4time12TickDuration5nsecsMxFNaNbNdNiNfZl@Base 12
+ _D4core4time12TickDuration5opCmpMxFNaNbNiNfSQBqQBoQBmZi@Base 12
+ _D4core4time12TickDuration5usecsMxFNaNbNdNiNfZl@Base 12
+ _D4core4time12TickDuration6__ctorMFNaNbNcNiNflZSQBuQBsQBq@Base 12
+ _D4core4time12TickDuration6__initZ@Base 12
+ _D4core4time12TickDuration6hnsecsMxFNaNbNdNiNfZl@Base 12
+ _D4core4time12TickDuration7secondsMxFNaNbNdNiNfZl@Base 12
+ _D4core4time12TickDuration8__xopCmpMxFKxSQBnQBlQBjZi@Base 12
+ _D4core4time12TickDuration9appOriginySQBkQBiQBg@Base 12
+ _D4core4time12__ModuleInfoZ@Base 12
+ _D4core4time12nsecsToTicksFNaNbNiNflZl@Base 12
+ _D4core4time12ticksToNSecsFNaNbNiNflZl@Base 12
+ _D4core4time13TimeException6__ctorMFNaNbNfAyaC6object9ThrowableQvmZCQCoQCmQCk@Base 12
+ _D4core4time13TimeException6__ctorMFNaNbNfAyaQdmC6object9ThrowableZCQCoQCmQCk@Base 12
+ _D4core4time13TimeException6__initZ@Base 12
+ _D4core4time13TimeException6__vtblZ@Base 12
+ _D4core4time13TimeException7__ClassZ@Base 12
+ _D4core4time13_clockTypeIdxFEQBbQz9ClockTypeZm@Base 12
+ _D4core4time13convClockFreqFNaNbNiNflllZl@Base 12
+ _D4core4time14_clockTypeNameFEQBcQBa9ClockTypeZAya@Base 12
+ _D4core4time15_ticksPerSecondyG8l@Base 12
+ _D4core4time25unitsAreInDescendingOrderFMAAyaZb@Base 12
+ _D4core4time3absFNaNbNiNfSQyQv12TickDurationZQu@Base 12
+ _D4core4time3absFNaNbNiNfSQyQv8DurationZQp@Base 12
+ _D4core4time4_absFNaNbNiNfdZd@Base 12
+ _D4core4time4_absFNaNbNiNflZl@Base 12
+ _D4core4time8Duration10isNegativeMxFNaNbNdNiNfZb@Base 12
+ _D4core4time8Duration3maxFNaNbNdNiNfZSQBkQBiQBg@Base 12
+ _D4core4time8Duration3minFNaNbNdNiNfZSQBkQBiQBg@Base 12
+ _D4core4time8Duration4zeroFNaNbNdNiNfZSQBlQBjQBh@Base 12
+ _D4core4time8Duration5opCmpMxFNaNbNiNfSQBlQBjQBhZi@Base 12
+ _D4core4time8Duration6__ctorMFNaNbNcNiNflZSQBpQBnQBl@Base 12
+ _D4core4time8Duration6__initZ@Base 12
+ _D4core4time8Duration8__xopCmpMxFKxSQBiQBgQBeZi@Base 12
+ _D4core4time8Duration8toStringMxFNaNbNfZAya@Base 12
+ _D4core4time8Duration__T10opOpAssignVAyaa1_2aZQwMFNaNbNcNiNflZSQCjQChQCf@Base 12
+ _D4core4time8Duration__T10opOpAssignVAyaa1_2bTSQBtQBrQBpZQBhMFNaNbNcNiNfxSQCuQCsQCqZQBm@Base 12
+ _D4core4time8Duration__T5splitVAyaa7_7365636f6e6473VQva5_6e73656373ZQBsMxFNaNbNiNfZ10SplitUnits6__initZ@Base 12
+ _D4core4time8Duration__T5splitVAyaa7_7365636f6e6473VQva5_6e73656373ZQBsMxFNaNbNiNfZ12genSplitCallFNaNbNfZQCw@Base 12
+ _D4core4time8Duration__T5splitVAyaa7_7365636f6e6473VQva5_6e73656373ZQBsMxFNaNbNiNfZ14genMemberDeclsFNaNbNfZQCy@Base 12
+ _D4core4time8Duration__T5splitVAyaa7_7365636f6e6473VQva5_6e73656373ZQBsMxFNaNbNiNfZSQDeQDcQDa__TQCuVQCra7_7365636f6e6473VQDma5_6e73656373ZQEkMxFNaNbNiNfZ10SplitUnits@Base 12
+ _D4core4time8Duration__T5splitVAyaa7_7365636f6e6473VQva5_6e73656373Z__TQBvTlTlZQCdMxFNaNbNiNfJlJlZv@Base 12
+ _D4core4time8Duration__T5totalVAyaa5_6d73656373ZQyMxFNaNbNdNiNfZl@Base 12
+ _D4core4time8Duration__T5totalVAyaa7_7365636f6e6473ZQBcMxFNaNbNdNiNfZl@Base 12
+ _D4core4time8Duration__T8opBinaryVAyaa1_2bTSQBqQBoQBmZQBeMxFNaNbNiNfQzZQBc@Base 12
+ _D4core4time8Duration__T8opBinaryVAyaa1_2dTxSQBrQBpQBnZQBfMxFNaNbNiNfxQBaZSQCvQCtQCr@Base 12
+ _D4core4time8Duration__T8toStringTDFNaNbNfIAaZvZQyMxFMQuZ10appListSepFNaNbNfQBqkbZv@Base 12
+ _D4core4time8Duration__T8toStringTDFNaNbNfIAaZvZQyMxFMQuZ5unitsyAAa@Base 12
+ _D4core4time8Duration__T8toStringTDFNaNbNfIAaZvZQyMxFMQuZ__T10appUnitValVAyaa4_64617973ZQBcFNaNbNfQCmlZv@Base 12
+ _D4core4time8Duration__T8toStringTDFNaNbNfIAaZvZQyMxFMQuZ__T10appUnitValVAyaa5_686f757273ZQBeFNaNbNfQColZv@Base 12
+ _D4core4time8Duration__T8toStringTDFNaNbNfIAaZvZQyMxFMQuZ__T10appUnitValVAyaa5_6d73656373ZQBeFNaNbNfQColZv@Base 12
+ _D4core4time8Duration__T8toStringTDFNaNbNfIAaZvZQyMxFMQuZ__T10appUnitValVAyaa5_7573656373ZQBeFNaNbNfQColZv@Base 12
+ _D4core4time8Duration__T8toStringTDFNaNbNfIAaZvZQyMxFMQuZ__T10appUnitValVAyaa5_7765656b73ZQBeFNaNbNfQColZv@Base 12
+ _D4core4time8Duration__T8toStringTDFNaNbNfIAaZvZQyMxFMQuZ__T10appUnitValVAyaa6_686e73656373ZQBgFNaNbNfQCqlZv@Base 12
+ _D4core4time8Duration__T8toStringTDFNaNbNfIAaZvZQyMxFMQuZ__T10appUnitValVAyaa7_6d696e75746573ZQBiFNaNbNfQCslZv@Base 12
+ _D4core4time8Duration__T8toStringTDFNaNbNfIAaZvZQyMxFMQuZ__T10appUnitValVAyaa7_7365636f6e6473ZQBiFNaNbNfQCslZv@Base 12
+ _D4core4time8Duration__T8toStringTDFNaNbNfIAaZvZQyMxFNaNbNfMQBaZv@Base 12
+ _D4core4time__T12MonoTimeImplVEQBdQBb9ClockTypei0ZQBj14ticksPerSecondFNaNbNdNiNfZl@Base 12
+ _D4core4time__T12MonoTimeImplVEQBdQBb9ClockTypei0ZQBj3maxFNaNbNdNiNfZSQCqQCo__TQCmVQCbi0ZQCw@Base 12
+ _D4core4time__T12MonoTimeImplVEQBdQBb9ClockTypei0ZQBj3minFNaNbNdNiNfZSQCqQCo__TQCmVQCbi0ZQCw@Base 12
+ _D4core4time__T12MonoTimeImplVEQBdQBb9ClockTypei0ZQBj4zeroFNaNbNdNiNfZSQCrQCp__TQCnVQCci0ZQCx@Base 12
+ _D4core4time__T12MonoTimeImplVEQBdQBb9ClockTypei0ZQBj5opCmpMxFNaNbNiNfSQCrQCp__TQCnVQCci0ZQCxZi@Base 12
+ _D4core4time__T12MonoTimeImplVEQBdQBb9ClockTypei0ZQBj5ticksMxFNaNbNdNiNfZl@Base 12
+ _D4core4time__T12MonoTimeImplVEQBdQBb9ClockTypei0ZQBj6__initZ@Base 12
+ _D4core4time__T12MonoTimeImplVEQBdQBb9ClockTypei0ZQBj8__xopCmpMxFKxSQCoQCm__TQCkVQBzi0ZQCuZi@Base 12
+ _D4core4time__T12MonoTimeImplVEQBdQBb9ClockTypei0ZQBj8currTimeFNbNdNiNeZSQCtQCr__TQCpVQCei0ZQCz@Base 12
+ _D4core4time__T12MonoTimeImplVEQBdQBb9ClockTypei0ZQBj8toStringMxFNaNbNfZAya@Base 12
+ _D4core4time__T12MonoTimeImplVEQBdQBb9ClockTypei0ZQBj__T8opBinaryVAyaa1_2dZQtMxFNaNbNiNfSQDjQDh__TQDfVQCui0ZQDpZSQEhQEf8Duration@Base 12
+ _D4core4time__T20splitUnitsFromHNSecsVAyaa4_64617973ZQBmFNaNbNiNfKlZl@Base 12
+ _D4core4time__T20splitUnitsFromHNSecsVAyaa5_686f757273ZQBoFNaNbNiNfKlZl@Base 12
+ _D4core4time__T20splitUnitsFromHNSecsVAyaa5_6d73656373ZQBoFNaNbNiNfKlZl@Base 12
+ _D4core4time__T20splitUnitsFromHNSecsVAyaa5_7573656373ZQBoFNaNbNiNfKlZl@Base 12
+ _D4core4time__T20splitUnitsFromHNSecsVAyaa5_7765656b73ZQBoFNaNbNiNfKlZl@Base 12
+ _D4core4time__T20splitUnitsFromHNSecsVAyaa7_6d696e75746573ZQBsFNaNbNiNfKlZl@Base 12
+ _D4core4time__T20splitUnitsFromHNSecsVAyaa7_7365636f6e6473ZQBsFNaNbNiNfKlZl@Base 12
+ _D4core4time__T2toVAyaa5_6d73656373TlTxSQBmQBk12TickDurationZQBuFNaNbNiNfxQBjZl@Base 12
+ _D4core4time__T2toVAyaa5_6e73656373TlTxSQBmQBk12TickDurationZQBuFNaNbNiNfxQBjZl@Base 12
+ _D4core4time__T2toVAyaa5_7573656373TlTxSQBmQBk12TickDurationZQBuFNaNbNiNfxQBjZl@Base 12
+ _D4core4time__T2toVAyaa6_686e73656373TlTxSQBoQBm12TickDurationZQBwFNaNbNiNfxQBjZl@Base 12
+ _D4core4time__T2toVAyaa7_7365636f6e6473TlTxSQBqQBo12TickDurationZQByFNaNbNiNfxQBjZl@Base 12
+ _D4core4time__T3durVAyaa4_64617973ZQuFNaNbNiNflZSQBvQBt8Duration@Base 12
+ _D4core4time__T3durVAyaa5_686f757273ZQwFNaNbNiNflZSQBxQBv8Duration@Base 12
+ _D4core4time__T3durVAyaa5_6d73656373ZQwFNaNbNiNflZSQBxQBv8Duration@Base 12
+ _D4core4time__T3durVAyaa5_6e73656373ZQwFNaNbNiNflZSQBxQBv8Duration@Base 12
+ _D4core4time__T3durVAyaa5_7573656373ZQwFNaNbNiNflZSQBxQBv8Duration@Base 12
+ _D4core4time__T3durVAyaa5_7765656b73ZQwFNaNbNiNflZSQBxQBv8Duration@Base 12
+ _D4core4time__T3durVAyaa6_686e73656373ZQyFNaNbNiNflZSQBzQBx8Duration@Base 12
+ _D4core4time__T3durVAyaa7_6d696e75746573ZQBaFNaNbNiNflZSQCcQCa8Duration@Base 12
+ _D4core4time__T3durVAyaa7_7365636f6e6473ZQBaFNaNbNiNflZSQCcQCa8Duration@Base 12
+ _D4core4time__T7convertVAyaa4_64617973VQpa6_686e73656373ZQBqFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa5_686f757273VQra6_686e73656373ZQBsFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa5_6d73656373VQra6_686e73656373ZQBsFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa5_6e73656373VQra6_686e73656373ZQBsFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa5_7573656373VQra6_686e73656373ZQBsFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa5_7765656b73VQra6_686e73656373ZQBsFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa6_686e73656373VQta4_64617973ZQBqFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa6_686e73656373VQta5_686f757273ZQBsFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa6_686e73656373VQta5_6d73656373ZQBsFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa6_686e73656373VQta5_6e73656373ZQBsFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa6_686e73656373VQta5_7573656373ZQBsFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa6_686e73656373VQta5_7765656b73ZQBsFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa6_686e73656373VQta6_686e73656373ZQBuFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa6_686e73656373VQta7_6d696e75746573ZQBwFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa6_686e73656373VQta7_7365636f6e6473ZQBwFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa7_6d696e75746573VQva6_686e73656373ZQBwFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa7_7365636f6e6473VQva5_6d73656373ZQBuFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa7_7365636f6e6473VQva5_6e73656373ZQBuFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa7_7365636f6e6473VQva5_7573656373ZQBuFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa7_7365636f6e6473VQva6_686e73656373ZQBwFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa7_7365636f6e6473VQva7_7365636f6e6473ZQByFNaNbNiNflZl@Base 12
+ _D4core5bitop11__moduleRefZ@Base 12
+ _D4core5bitop12__ModuleInfoZ@Base 12
+ _D4core5bitop2btFNaNbNiMxPmmZi@Base 12
+ _D4core5bitop3bsfFNaNbNiNfkZi@Base 12
+ _D4core5bitop3bsfFNaNbNiNfmZi@Base 12
+ _D4core5bitop3bsrFNaNbNiNfkZi@Base 12
+ _D4core5bitop3bsrFNaNbNiNfmZi@Base 12
+ _D4core5bitop6popcntFNaNbNiNfkZi@Base 12
+ _D4core5bitop6popcntFNaNbNiNfmZi@Base 12
+ _D4core5bitop7Split646__ctorMFNaNbNcNiNfmZSQBpQBnQBk@Base 12
+ _D4core5bitop7Split646__initZ@Base 12
+ _D4core5bitop7bitswapFNaNbNiNfkZk@Base 12
+ _D4core5bitop7bitswapFNaNbNiNfmZm@Base 12
+ _D4core5bitop8BitRange5emptyMxFNaNbNiNfZb@Base 12
+ _D4core5bitop8BitRange5frontMFNaNbNiNfZm@Base 12
+ _D4core5bitop8BitRange6__ctorMFNaNbNcNiPxmmZSQBrQBpQBm@Base 12
+ _D4core5bitop8BitRange6__initZ@Base 12
+ _D4core5bitop8BitRange8popFrontMFNaNbNiZv@Base 12
+ _D4core5bitop8byteswapFNaNbNiNftZt@Base 12
+ _D4core5bitop__T10softPopcntTkZQpFNaNbNiNfkZi@Base 12
+ _D4core5bitop__T10softPopcntTmZQpFNaNbNiNfmZi@Base 12
+ _D4core5bitop__T11softBitswapTkZQqFNaNbNiNfkZk@Base 12
+ _D4core5bitop__T11softBitswapTmZQqFNaNbNiNfmZm@Base 12
+ _D4core5bitop__T8softScanTkVbi0ZQqFNaNbNiNfkZi@Base 12
+ _D4core5bitop__T8softScanTkVbi1ZQqFNaNbNiNfkZi@Base 12
+ _D4core5bitop__T8softScanTmVbi0ZQqFNaNbNiNfmZi@Base 12
+ _D4core5bitop__T8softScanTmVbi1ZQqFNaNbNiNfmZi@Base 12
+ _D4core5cpuid10_hasPopcntyb@Base 12
+ _D4core5cpuid10_hasRdrandyb@Base 12
+ _D4core5cpuid10_hasRdseedyb@Base 12
+ _D4core5cpuid10_isItaniumyb@Base 12
+ _D4core5cpuid10_processoryAa@Base 12
+ _D4core5cpuid10_x87onChipyb@Base 12
+ _D4core5cpuid10dataCachesFNaNbNdNiNeZxG5SQBnQBl9CacheInfo@Base 12
+ _D4core5cpuid11CpuFeatures11__xopEqualsMxFKxSQBrQBpQBmZb@Base 12
+ _D4core5cpuid11CpuFeatures6__initZ@Base 12
+ _D4core5cpuid11CpuFeatures9__xtoHashFNbNeKxSQBqQBoQBlZm@Base 12
+ _D4core5cpuid11__moduleRefZ@Base 12
+ _D4core5cpuid11_dataCachesyG5SQBcQBa9CacheInfo@Base 12
+ _D4core5cpuid11amd3dnowExtFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid11cacheLevelsFNbNdNiNeZk@Base 12
+ _D4core5cpuid11coresPerCPUFNaNbNdNiNeZk@Base 12
+ _D4core5cpuid11cpuFeaturesSQzQw11CpuFeatures@Base 12
+ _D4core5cpuid11hasLahfSahfFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid12__ModuleInfoZ@Base 12
+ _D4core5cpuid12_amd3dnowExtyb@Base 12
+ _D4core5cpuid12_coresPerCPUyk@Base 12
+ _D4core5cpuid12_hasLahfSahfyb@Base 12
+ _D4core5cpuid12getCpuInfo0BFNbNiNeZv@Base 12
+ _D4core5cpuid12hasCmpxchg8bFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid12hasPclmulqdqFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid12preferAthlonFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid13_hasCmpxchg8byb@Base 12
+ _D4core5cpuid13_hasPclmulqdqyb@Base 12
+ _D4core5cpuid13_preferAthlonyb@Base 12
+ _D4core5cpuid13hasCmpxchg16bFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid13hasVpclmulqdqFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid13threadsPerCPUFNaNbNdNiNeZk@Base 12
+ _D4core5cpuid14_hasCmpxchg16byb@Base 12
+ _D4core5cpuid14_hasVpclmulqdqyb@Base 12
+ _D4core5cpuid14_threadsPerCPUyk@Base 12
+ _D4core5cpuid14getCpuFeaturesFNbNiNeZPSQBlQBj11CpuFeatures@Base 12
+ _D4core5cpuid14hyperThreadingFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid14numCacheLevelsk@Base 12
+ _D4core5cpuid14preferPentium1FNaNbNdNiNeZb@Base 12
+ _D4core5cpuid14preferPentium4FNaNbNdNiNeZb@Base 12
+ _D4core5cpuid15_hyperThreadingyb@Base 12
+ _D4core5cpuid15_preferPentium1yb@Base 12
+ _D4core5cpuid15_preferPentium4yb@Base 12
+ _D4core5cpuid15getAMDcacheinfoFNbNiNeZ8assocmapyAh@Base 12
+ _D4core5cpuid15getAMDcacheinfoFNbNiNeZv@Base 12
+ _D4core5cpuid16has3dnowPrefetchFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid17_has3dnowPrefetchyb@Base 12
+ _D4core5cpuid17hyperThreadingBitFNbNdNiNeZb@Base 12
+ _D4core5cpuid18getcacheinfoCPUID2FNbNiNeZ14decipherCpuid2MFNbNihZ3idsyG63h@Base 12
+ _D4core5cpuid18getcacheinfoCPUID2FNbNiNeZ14decipherCpuid2MFNbNihZ4waysyG63h@Base 12
+ _D4core5cpuid18getcacheinfoCPUID2FNbNiNeZ14decipherCpuid2MFNbNihZ5sizesyG63k@Base 12
+ _D4core5cpuid18getcacheinfoCPUID2FNbNiNeZv@Base 12
+ _D4core5cpuid18getcacheinfoCPUID4FNbNiNeZv@Base 12
+ _D4core5cpuid18hasSysEnterSysExitFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid18max_extended_cpuidk@Base 12
+ _D4core5cpuid19_hasSysEnterSysExityb@Base 12
+ _D4core5cpuid26_sharedStaticCtor_L1068_C1FNbNiNeZv@Base 12
+ _D4core5cpuid3aesFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid3avxFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid3fmaFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid3hleFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid3mmxFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid3rtmFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid3sseFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid4_aesyb@Base 12
+ _D4core5cpuid4_avxyb@Base 12
+ _D4core5cpuid4_fmayb@Base 12
+ _D4core5cpuid4_hleyb@Base 12
+ _D4core5cpuid4_mmxyb@Base 12
+ _D4core5cpuid4_rtmyb@Base 12
+ _D4core5cpuid4_sseyb@Base 12
+ _D4core5cpuid4avx2FNaNbNdNiNeZb@Base 12
+ _D4core5cpuid4sse2FNaNbNdNiNeZb@Base 12
+ _D4core5cpuid4sse3FNaNbNdNiNeZb@Base 12
+ _D4core5cpuid4vaesFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid5_avx2yb@Base 12
+ _D4core5cpuid5_sse2yb@Base 12
+ _D4core5cpuid5_sse3yb@Base 12
+ _D4core5cpuid5_vaesyb@Base 12
+ _D4core5cpuid5fp16cFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid5modelk@Base 12
+ _D4core5cpuid5sse41FNaNbNdNiNeZb@Base 12
+ _D4core5cpuid5sse42FNaNbNdNiNeZb@Base 12
+ _D4core5cpuid5sse4aFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid5ssse3FNaNbNdNiNeZb@Base 12
+ _D4core5cpuid6_fp16cyb@Base 12
+ _D4core5cpuid6_sse41yb@Base 12
+ _D4core5cpuid6_sse42yb@Base 12
+ _D4core5cpuid6_sse4ayb@Base 12
+ _D4core5cpuid6_ssse3yb@Base 12
+ _D4core5cpuid6amdMmxFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid6familyk@Base 12
+ _D4core5cpuid6hasShaFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid6vendorFNaNbNdNiNeZAya@Base 12
+ _D4core5cpuid7_amdMmxyb@Base 12
+ _D4core5cpuid7_hasShayb@Base 12
+ _D4core5cpuid7_vendoryAa@Base 12
+ _D4core5cpuid7hasCmovFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid7hasFxsrFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid8_hasCmovyb@Base 12
+ _D4core5cpuid8_hasFxsryb@Base 12
+ _D4core5cpuid8amd3dnowFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid8cpuidX86FNbNiNeZv@Base 12
+ _D4core5cpuid8hasCPUIDFNbNiNeZb@Base 12
+ _D4core5cpuid8hasLzcntFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid8hasRdtscFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid8isX86_64FNaNbNdNiNeZb@Base 12
+ _D4core5cpuid8steppingk@Base 12
+ _D4core5cpuid9CacheInfo6__initZ@Base 12
+ _D4core5cpuid9_amd3dnowyb@Base 12
+ _D4core5cpuid9_hasLzcntyb@Base 12
+ _D4core5cpuid9_hasRdtscyb@Base 12
+ _D4core5cpuid9_isX86_64yb@Base 12
+ _D4core5cpuid9datacacheG5SQyQv9CacheInfo@Base 12
+ _D4core5cpuid9hasPopcntFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid9hasRdrandFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid9hasRdseedFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid9isItaniumFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid9max_cpuidk@Base 12
+ _D4core5cpuid9processorFNaNbNdNiNeZAya@Base 12
+ _D4core5cpuid9x87onChipFNaNbNdNiNeZb@Base 12
+ _D4core6atomic11__moduleRefZ@Base 12
+ _D4core6atomic12__ModuleInfoZ@Base 12
+ _D4core6atomic5pauseFNaNbNiNfZv@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi0TPOS2rt9critical_18D_CRITICAL_SECTIONZQCvFNaNbNiNeNkMKOxPSQCcQCcQBvZQCp@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi0TPOS2rt9critical_18D_CRITICAL_SECTIONZQCvFNaNbNiNeNkMKxPOxSQCdQCdQBwZQCq@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi0TmZQBmFNaNbNiNeKOxmZm@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi0TmZQBmFNaNbNiNeKxmZm@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi2TPOS2rt8monitor_7MonitorZQCiFNaNbNiNeNkMKOxPSQBpQBpQBjZQCc@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi2TPOS2rt8monitor_7MonitorZQCiFNaNbNiNeNkMKxPOxSQBqQBqQBkZQCd@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi2TPOS2rt9critical_18D_CRITICAL_SECTIONZQCvFNaNbNiNeNkMKOxPSQCcQCcQBvZQCp@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi2TPOS2rt9critical_18D_CRITICAL_SECTIONZQCvFNaNbNiNeNkMKxPOxSQCdQCdQBwZQCq@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi5TbZQBmFNaNbNiNeKOxbZb@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi5TbZQBmFNaNbNiNeKxbZb@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi5TiZQBmFNaNbNiNeKOxiZi@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi5TiZQBmFNaNbNiNeKxiZi@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi5TkZQBmFNaNbNiNeKOxkZk@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi5TkZQBmFNaNbNiNeKxkZk@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi0TbTbZQBpFNaNbNiNeKObbZv@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi0TbTbZQBpFNaNbNiNeKbbZv@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi0TmTiZQBpFNaNbNiNeKOmiZv@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi0TmTiZQBpFNaNbNiNeKmiZv@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi3TPOS2rt8monitor_7MonitorTQyZQCmFNaNbNiNeKOPQBoQBtZv@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi3TPOS2rt8monitor_7MonitorTQyZQCmFNaNbNiNeKQBoQBrZv@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi3TPOS2rt9critical_18D_CRITICAL_SECTIONTQBlZQDaFNaNbNiNeKOPQCcQChZv@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi3TPOS2rt9critical_18D_CRITICAL_SECTIONTQBlZQDaFNaNbNiNeKQCcQCfZv@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi3TmTmZQBpFNaNbNiNeKOmmZv@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi3TmTmZQBpFNaNbNiNeKmmZv@Base 12
+ _D4core6atomic__T14atomicFetchAddVEQBhQBf11MemoryOrderi5TkZQBqFNaNbNiNeKOkmZk@Base 12
+ _D4core6atomic__T14atomicFetchAddVEQBhQBf11MemoryOrderi5TkZQBqFNaNbNiNeKkmZk@Base 12
+ _D4core6atomic__T14atomicFetchAddVEQBhQBf11MemoryOrderi5TmZQBqFNaNbNiNeKOmmZm@Base 12
+ _D4core6atomic__T14atomicFetchAddVEQBhQBf11MemoryOrderi5TmZQBqFNaNbNiNeKmmZm@Base 12
+ _D4core6atomic__T14atomicFetchSubVEQBhQBf11MemoryOrderi5TkZQBqFNaNbNiNeKOkmZk@Base 12
+ _D4core6atomic__T14atomicFetchSubVEQBhQBf11MemoryOrderi5TkZQBqFNaNbNiNeKkmZk@Base 12
+ _D4core6atomic__T14atomicFetchSubVEQBhQBf11MemoryOrderi5TmZQBqFNaNbNiNeKOmmZm@Base 12
+ _D4core6atomic__T14atomicFetchSubVEQBhQBf11MemoryOrderi5TmZQBqFNaNbNiNeKmmZm@Base 12
+ _D4core6atomic__T26atomicPtrIsProperlyAlignedTOhZQBgFNaNbNiNfPOhZb@Base 12
+ _D4core6atomic__T26atomicPtrIsProperlyAlignedTOkZQBgFNaNbNiNfPOkZb@Base 12
+ _D4core6atomic__T26atomicPtrIsProperlyAlignedTOmZQBgFNaNbNiNfPOmZb@Base 12
+ _D4core6atomic__T26atomicPtrIsProperlyAlignedTOtZQBgFNaNbNiNfPOtZb@Base 12
+ _D4core6atomic__T26atomicPtrIsProperlyAlignedThZQBfFNaNbNiNfPhZb@Base 12
+ _D4core6atomic__T26atomicPtrIsProperlyAlignedTkZQBfFNaNbNiNfPkZb@Base 12
+ _D4core6atomic__T26atomicPtrIsProperlyAlignedTmZQBfFNaNbNiNfPmZb@Base 12
+ _D4core6atomic__T26atomicPtrIsProperlyAlignedTtZQBfFNaNbNiNfPtZb@Base 12
+ _D4core6atomic__T28atomicValueIsProperlyAlignedTOkZQBiFNaNbNiNeKOkZb@Base 12
+ _D4core6atomic__T28atomicValueIsProperlyAlignedTOmZQBiFNaNbNiNeKOmZb@Base 12
+ _D4core6atomic__T28atomicValueIsProperlyAlignedTkZQBhFNaNbNiNeKkZb@Base 12
+ _D4core6atomic__T28atomicValueIsProperlyAlignedTmZQBhFNaNbNiNeKmZb@Base 12
+ _D4core6atomic__T3casZ__TQiThThThZQrFNaNbNiNePOhhhZb@Base 12
+ _D4core6atomic__T3casZ__TQiThThThZQrFNaNbNiNePhhhZb@Base 12
+ _D4core6atomic__T3casZ__TQiTmTmTmZQrFNaNbNiNePOmmmZb@Base 12
+ _D4core6atomic__T3casZ__TQiTmTmTmZQrFNaNbNiNePmmmZb@Base 12
+ _D4core6atomic__T3casZ__TQiTtTtTtZQrFNaNbNiNePOtttZb@Base 12
+ _D4core6atomic__T3casZ__TQiTtTtTtZQrFNaNbNiNePtttZb@Base 12
+ _D4core6atomic__T8atomicOpVAyaa2_2b3dTkTiZQzFNaNbNiNfKOkiZk@Base 12
+ _D4core6atomic__T8atomicOpVAyaa2_2b3dTmTiZQzFNaNbNiNfKOmiZm@Base 12
+ _D4core6atomic__T8atomicOpVAyaa2_2b3dTmTmZQzFNaNbNiNfKOmmZm@Base 12
+ _D4core6atomic__T8atomicOpVAyaa2_2d3dTkTiZQzFNaNbNiNfKOkiZk@Base 12
+ _D4core6atomic__T8atomicOpVAyaa2_2d3dTmTiZQzFNaNbNiNfKOmiZm@Base 12
+ _D4core6atomic__T8atomicOpVAyaa2_2d3dTmTmZQzFNaNbNiNfKOmmZm@Base 12
+ _D4core6int12811__moduleRefZ@Base 12
+ _D4core6int12812__ModuleInfoZ@Base 12
+ _D4core6int1282geFNaNbNiNfSQzQw4CentQkZb@Base 12
+ _D4core6int1282gtFNaNbNiNfSQzQw4CentQkZb@Base 12
+ _D4core6int1282leFNaNbNiNfSQzQw4CentQkZb@Base 12
+ _D4core6int1282ltFNaNbNiNfSQzQw4CentQkZb@Base 12
+ _D4core6int1282orFNaNbNiNfSQzQw4CentQkZQn@Base 12
+ _D4core6int1283addFNaNbNiNfSQBaQy4CentQlZQo@Base 12
+ _D4core6int1283andFNaNbNiNfSQBaQy4CentQlZQo@Base 12
+ _D4core6int1283comFNaNbNiNfSQBaQy4CentZQm@Base 12
+ _D4core6int1283decFNaNbNiNfSQBaQy4CentZQm@Base 12
+ _D4core6int1283divFNaNbNiNfSQBaQy4CentQlZQo@Base 12
+ _D4core6int1283incFNaNbNiNfSQBaQy4CentZQm@Base 12
+ _D4core6int1283mulFNaNbNiNfSQBaQy4CentQlZQo@Base 12
+ _D4core6int1283negFNaNbNiNfSQBaQy4CentZQm@Base 12
+ _D4core6int1283rolFNaNbNiNfSQBaQy4CentkZQn@Base 12
+ _D4core6int1283rorFNaNbNiNfSQBaQy4CentkZQn@Base 12
+ _D4core6int1283sarFNaNbNiNfSQBaQy4CentkZQn@Base 12
+ _D4core6int1283shlFNaNbNiNfSQBaQy4CentkZQn@Base 12
+ _D4core6int1283shrFNaNbNiNfSQBaQy4CentkZQn@Base 12
+ _D4core6int1283subFNaNbNiNfSQBaQy4CentQlZQo@Base 12
+ _D4core6int1283tstFNaNbNiNfSQBaQy4CentZb@Base 12
+ _D4core6int1283ugeFNaNbNiNfSQBaQy4CentQlZb@Base 12
+ _D4core6int1283ugtFNaNbNiNfSQBaQy4CentQlZb@Base 12
+ _D4core6int1283uleFNaNbNiNfSQBaQy4CentQlZb@Base 12
+ _D4core6int1283ultFNaNbNiNfSQBaQy4CentQlZb@Base 12
+ _D4core6int1283xorFNaNbNiNfSQBaQy4CentQlZQo@Base 12
+ _D4core6int1284Cent6__initZ@Base 12
+ _D4core6int1284rol1FNaNbNiNfSQBbQz4CentZQm@Base 12
+ _D4core6int1284ror1FNaNbNiNfSQBbQz4CentZQm@Base 12
+ _D4core6int1284sar1FNaNbNiNfSQBbQz4CentZQm@Base 12
+ _D4core6int1284shl1FNaNbNiNfSQBbQz4CentZQm@Base 12
+ _D4core6int1284shr1FNaNbNiNfSQBbQz4CentZQm@Base 12
+ _D4core6int1284udivFNaNbNiNfSQBbQz4CentQlZQo@Base 12
+ _D4core6int1286divmodFNaNbNiNfSQBdQBb4CentQmJQpZQs@Base 12
+ _D4core6int1287udivmodFNaNbNiNfSQBeQBc4CentQmJQpZ13udivmod128_64FNaNbNiNfQBqmJmZm@Base 12
+ _D4core6int1287udivmodFNaNbNiNfSQBeQBc4CentQmJQpZ13udivmod128_64FQBimJmZ9udiv96_64FNaNbNiNfmkmZk@Base 12
+ _D4core6int1287udivmodFNaNbNiNfSQBeQBc4CentQmJQpZQs@Base 12
+ _D4core6memory10initialize@Base 12
+ _D4core6memory11__moduleRefZ@Base 12
+ _D4core6memory12__ModuleInfoZ@Base 12
+ _D4core6memory2GC12ProfileStats6__initZ@Base 12
+ _D4core6memory2GC12profileStatsFNbNiNfZSQBmQBkQBg12ProfileStats@Base 12
+ _D4core6memory2GC5Stats6__initZ@Base 12
+ _D4core6memory2GC5queryFNaNbNkMPvZSQBhQBf8BlkInfo_@Base 12
+ _D4core6memory2GC5queryFNbNkMxPvZSQBgQBe8BlkInfo_@Base 12
+ _D4core6memory2GC5statsFNbNiNfZSQBeQBcQy5Stats@Base 12
+ _D4core6memory2GC6__initZ@Base 12
+ _D4core6memory2GC6addrOfFNaNbNiNePNgvZQf@Base 12
+ _D4core6memory2GC6addrOfFNaNbNiNePvZQd@Base 12
+ _D4core6memory2GC6sizeOfFNaNbNiPvZm@Base 12
+ _D4core6memory2GC6sizeOfFNbNiMxPvZm@Base 12
+ _D4core6memory2GC7clrAttrFNaNbPvkZk@Base 12
+ _D4core6memory2GC7clrAttrFNbMxPvkZk@Base 12
+ _D4core6memory2GC7getAttrFNaNbPvZk@Base 12
+ _D4core6memory2GC7getAttrFNbMxPvZk@Base 12
+ _D4core6memory2GC7setAttrFNaNbPvkZk@Base 12
+ _D4core6memory2GC7setAttrFNbMxPvkZk@Base 12
+ _D4core6memory8BlkInfo_6__initZ@Base 12
+ _D4core6memory8pageSizeym@Base 12
+ _D4core6stdcpp11string_view11__moduleRefZ@Base 12
+ _D4core6stdcpp11string_view12__ModuleInfoZ@Base 12
+ _D4core6stdcpp11string_view__T11char_traitsTaZQq6__initZ@Base 12
+ _D4core6stdcpp11string_view__T11char_traitsTuZQq6__initZ@Base 12
+ _D4core6stdcpp11string_view__T11char_traitsTwZQq6__initZ@Base 12
+ _D4core6stdcpp11string_view__T17basic_string_viewTaTSQBzQBxQBt__T11char_traitsTaZQqZQCc6__initZ@Base 12
+ _D4core6stdcpp11string_view__T17basic_string_viewTuTSQBzQBxQBt__T11char_traitsTuZQqZQCc6__initZ@Base 12
+ _D4core6stdcpp11string_view__T17basic_string_viewTwTSQBzQBxQBt__T11char_traitsTwZQqZQCc6__initZ@Base 12
+ _D4core6stdcpp11type_traits11__moduleRefZ@Base 12
+ _D4core6stdcpp11type_traits12__ModuleInfoZ@Base 12
+ _D4core6stdcpp11type_traits__T17integral_constantTbVbi0ZQBa6__initZ@Base 12
+ _D4core6stdcpp11type_traits__T17integral_constantTbVbi1ZQBa6__initZ@Base 12
+ _D4core6stdcpp4new_11__moduleRefZ@Base 12
+ _D4core6stdcpp4new_11align_val_t6__initZ@Base 12
+ _D4core6stdcpp4new_11std_nothrowySQBgQBeQBa9nothrow_t@Base 12
+ _D4core6stdcpp4new_12__ModuleInfoZ@Base 12
+ _D4core6stdcpp4new_9bad_alloc6__initZ@Base 12
+ _D4core6stdcpp4new_9bad_alloc6__vtblZ@Base 12
+ _D4core6stdcpp4new_9bad_alloc7__ClassZ@Base 12
+ _D4core6stdcpp4new_9nothrow_t6__initZ@Base 12
+ _D4core6stdcpp5array11__moduleRefZ@Base 12
+ _D4core6stdcpp5array12__ModuleInfoZ@Base 12
+ _D4core6stdcpp6memory11__moduleRefZ@Base 12
+ _D4core6stdcpp6memory12__ModuleInfoZ@Base 12
+ _D4core6stdcpp6string11__moduleRefZ@Base 12
+ _D4core6stdcpp6string12__ModuleInfoZ@Base 12
+ _D4core6stdcpp6vector11__moduleRefZ@Base 12
+ _D4core6stdcpp6vector12__ModuleInfoZ@Base 12
+ _D4core6stdcpp7utility11__moduleRefZ@Base 12
+ _D4core6stdcpp7utility12__ModuleInfoZ@Base 12
+ _D4core6stdcpp8typeinfo10bad_typeid6__ctorMFNbNiZCQBwQBuQBqQBk@Base 12
+ _D4core6stdcpp8typeinfo10bad_typeid6__initZ@Base 12
+ _D4core6stdcpp8typeinfo10bad_typeid6__vtblZ@Base 12
+ _D4core6stdcpp8typeinfo10bad_typeid7__ClassZ@Base 12
+ _D4core6stdcpp8typeinfo11__moduleRefZ@Base 12
+ _D4core6stdcpp8typeinfo12__ModuleInfoZ@Base 12
+ _D4core6stdcpp8typeinfo8bad_cast6__ctorMFNbNiZCQBtQBrQBnQBh@Base 12
+ _D4core6stdcpp8typeinfo8bad_cast6__initZ@Base 12
+ _D4core6stdcpp8typeinfo8bad_cast6__vtblZ@Base 12
+ _D4core6stdcpp8typeinfo8bad_cast7__ClassZ@Base 12
+ _D4core6stdcpp8typeinfo9type_info6__ctorMFNiPxaZCQBvQBtQBpQBj@Base 12
+ _D4core6stdcpp8typeinfo9type_info6__initZ@Base 12
+ _D4core6stdcpp8typeinfo9type_info6__vtblZ@Base 12
+ _D4core6stdcpp8typeinfo9type_info7__ClassZ@Base 12
+ _D4core6stdcpp8xutility11__moduleRefZ@Base 12
+ _D4core6stdcpp8xutility12__ModuleInfoZ@Base 12
+ _D4core6stdcpp8xutility14CppStdRevision6__initZ@Base 12
+ _D4core6stdcpp9allocator11__moduleRefZ@Base 12
+ _D4core6stdcpp9allocator12__ModuleInfoZ@Base 12
+ _D4core6stdcpp9exception11__moduleRefZ@Base 12
+ _D4core6stdcpp9exception12__ModuleInfoZ@Base 12
+ _D4core6stdcpp9exception13bad_exception6__ctorMFNbNiPxaZCQCdQCbQBxQBq@Base 12
+ _D4core6stdcpp9exception13bad_exception6__initZ@Base 12
+ _D4core6stdcpp9exception13bad_exception6__vtblZ@Base 12
+ _D4core6stdcpp9exception13bad_exception7__ClassZ@Base 12
+ _D4core6stdcpp9exceptionQk6__ctorMFNbNiPxaiZCQBrQBpQBlQBo@Base 12
+ _D4core6stdcpp9exceptionQk6__ctorMFNbNiZCQBnQBlQBhQBk@Base 12
+ _D4core6stdcpp9exceptionQk6__initZ@Base 12
+ _D4core6stdcpp9exceptionQk6__vtblZ@Base 12
+ _D4core6stdcpp9exceptionQk7__ClassZ@Base 12
+ _D4core6thread10threadbase10ThreadBase10popContextMFNbNiZv@Base 12
+ _D4core6thread10threadbase10ThreadBase10topContextMFNbNiZPSQCfQCd7context12StackContext@Base 12
+ _D4core6thread10threadbase10ThreadBase11pushContextMFNbNiPSQCfQCd7context12StackContextZv@Base 12
+ _D4core6thread10threadbase10ThreadBase12isMainThreadMFNbNdNiZb@Base 12
+ _D4core6thread10threadbase10ThreadBase13nAboutToStartm@Base 12
+ _D4core6thread10threadbase10ThreadBase13pAboutToStartPCQCbQBzQBvQBm@Base 12
+ _D4core6thread10threadbase10ThreadBase13tlsGCdataInitMFNbNiZv@Base 12
+ _D4core6thread10threadbase10ThreadBase15initDataStorageMFNbZv@Base 12
+ _D4core6thread10threadbase10ThreadBase18criticalRegionLockFNbNdNiZCQCn4sync5mutex5Mutex@Base 12
+ _D4core6thread10threadbase10ThreadBase18destroyDataStorageMFNbNiZv@Base 12
+ _D4core6thread10threadbase10ThreadBase18destructBeforeDtorMFNbNiZb@Base 12
+ _D4core6thread10threadbase10ThreadBase19_criticalRegionLockG72v@Base 12
+ _D4core6thread10threadbase10ThreadBase25destroyDataStorageIfAvailMFNbNiZv@Base 12
+ _D4core6thread10threadbase10ThreadBase2idMFNdNiNfZm@Base 12
+ _D4core6thread10threadbase10ThreadBase3addFNbNiCQBuQBsQBoQBfbZv@Base 12
+ _D4core6thread10threadbase10ThreadBase3addFNbNiPSQBvQBt7context12StackContextZv@Base 12
+ _D4core6thread10threadbase10ThreadBase3runMFZv@Base 12
+ _D4core6thread10threadbase10ThreadBase4nameMFNdNiNfAyaZv@Base 12
+ _D4core6thread10threadbase10ThreadBase4nameMFNdNiNfZAya@Base 12
+ _D4core6thread10threadbase10ThreadBase5slockFNbNdNiZCQBz4sync5mutex5Mutex@Base 12
+ _D4core6thread10threadbase10ThreadBase5yieldFNbNiZv@Base 12
+ _D4core6thread10threadbase10ThreadBase6__ctorMFNaNbNiNfDFZvmZCQCiQCgQCcQBt@Base 12
+ _D4core6thread10threadbase10ThreadBase6__ctorMFNaNbNiNfPFZvmZCQCiQCgQCcQBt@Base 12
+ _D4core6thread10threadbase10ThreadBase6__ctorMFNaNbNiNfmZCQCeQCcQByQBp@Base 12
+ _D4core6thread10threadbase10ThreadBase6__initZ@Base 12
+ _D4core6thread10threadbase10ThreadBase6__vtblZ@Base 12
+ _D4core6thread10threadbase10ThreadBase6_slockG72v@Base 12
+ _D4core6thread10threadbase10ThreadBase6getAllFZ6resizeFNaNbNfKACQCkQCiQCeQBvmZv@Base 12
+ _D4core6thread10threadbase10ThreadBase6getAllFZACQBvQBtQBpQBg@Base 12
+ _D4core6thread10threadbase10ThreadBase6removeFNbNiCQBxQBvQBrQBiZv@Base 12
+ _D4core6thread10threadbase10ThreadBase6removeFNbNiPSQByQBw7context12StackContextZv@Base 12
+ _D4core6thread10threadbase10ThreadBase7__ClassZ@Base 12
+ _D4core6thread10threadbase10ThreadBase7getThisFNbNiNfZCQCbQBzQBvQBm@Base 12
+ _D4core6thread10threadbase10ThreadBase7opApplyFMDFKCQByQBwQBsQBjZiZ6resizeFNbNiKAQBemZv@Base 12
+ _D4core6thread10threadbase10ThreadBase7opApplyFMDFKCQByQBwQBsQBjZiZi@Base 12
+ _D4core6thread10threadbase10ThreadBase7setThisFNbNiCQByQBwQBsQBjZv@Base 12
+ _D4core6thread10threadbase10ThreadBase7sm_cbegPSQBuQBs7context12StackContext@Base 12
+ _D4core6thread10threadbase10ThreadBase7sm_mainCQBtQBrQBnQBe@Base 12
+ _D4core6thread10threadbase10ThreadBase7sm_tbegCQBtQBrQBnQBe@Base 12
+ _D4core6thread10threadbase10ThreadBase7sm_thisCQBtQBrQBnQBe@Base 12
+ _D4core6thread10threadbase10ThreadBase7sm_tlenm@Base 12
+ _D4core6thread10threadbase10ThreadBase8isDaemonMFNdNiNfZb@Base 12
+ _D4core6thread10threadbase10ThreadBase8isDaemonMFNdNiNfbZv@Base 12
+ _D4core6thread10threadbase10ThreadBase9initLocksFNiZv@Base 12
+ _D4core6thread10threadbase10ThreadBase9isRunningMFNbNdNiZb@Base 12
+ _D4core6thread10threadbase10ThreadBase9termLocksFNiZv@Base 12
+ _D4core6thread10threadbase10ThreadBase__T10getAllImplS_DQCcQCaQBwQBn6getAllFZ6resizeFNaNbNfKACQDoQDmQDiQCzmZvZQCrFZQx@Base 12
+ _D4core6thread10threadbase10ThreadBase__T10getAllImplS_DQCcQCaQBwQBn7opApplyFMDFKCQDcQDaQCwQCnZiZ6resizeFNbNiKAQBemZvZQCzFNiZQp@Base 12
+ _D4core6thread10threadbase11ThreadError6__ctorMFNaNbNiNfAyaC6object9ThrowableQvmZCQDcQDaQCwQCn@Base 12
+ _D4core6thread10threadbase11ThreadError6__ctorMFNaNbNiNfAyaQdmC6object9ThrowableZCQDcQDaQCwQCn@Base 12
+ _D4core6thread10threadbase11ThreadError6__initZ@Base 12
+ _D4core6thread10threadbase11ThreadError6__vtblZ@Base 12
+ _D4core6thread10threadbase11ThreadError7__ClassZ@Base 12
+ _D4core6thread10threadbase11__moduleRefZ@Base 12
+ _D4core6thread10threadbase11ll_nThreadsm@Base 12
+ _D4core6thread10threadbase11ll_pThreadsPSQBnQBl5types13ll_ThreadData@Base 12
+ _D4core6thread10threadbase12__ModuleInfoZ@Base 12
+ _D4core6thread10threadbase12lowlevelLockFNbNdNiZCQBv4sync5mutex5Mutex@Base 12
+ _D4core6thread10threadbase12suspendDepthk@Base 12
+ _D4core6thread10threadbase13onThreadErrorFNbNiAyaZ5errorCQCdQCbQBx11ThreadError@Base 12
+ _D4core6thread10threadbase13onThreadErrorFNbNiAyaZv@Base 12
+ _D4core6thread10threadbase15ThreadException6__ctorMFNaNbNiNfAyaC6object9ThrowableQvmZCQDgQDeQDaQCr@Base 12
+ _D4core6thread10threadbase15ThreadException6__ctorMFNaNbNiNfAyaQdmC6object9ThrowableZCQDgQDeQDaQCr@Base 12
+ _D4core6thread10threadbase15ThreadException6__initZ@Base 12
+ _D4core6thread10threadbase15ThreadException6__vtblZ@Base 12
+ _D4core6thread10threadbase15ThreadException7__ClassZ@Base 12
+ _D4core6thread10threadbase15ll_removeThreadFNbNimZv@Base 12
+ _D4core6thread10threadbase15scanAllTypeImplFNbMDFNbEQByQBwQBs8ScanTypePvQcZvQgZv@Base 12
+ _D4core6thread10threadbase17multiThreadedFlagb@Base 12
+ _D4core6thread10threadbase17thread_findByAddrFmZCQBvQBtQBp10ThreadBase@Base 12
+ _D4core6thread10threadbase18findLowLevelThreadFNbNimZb@Base 12
+ _D4core6thread10threadbase19initLowlevelThreadsFNiZv@Base 12
+ _D4core6thread10threadbase19termLowlevelThreadsFNiZv@Base 12
+ _D4core6thread10threadbase25_sharedStaticDtor_L948_C1FZv@Base 12
+ _D4core6thread10threadbase7ll_lockG72v@Base 12
+ _D4core6thread10threadbase__T15thread_term_tplTCQBuQBs8osthread6ThreadTG177vZQBwFNiKQnZv@Base 12
+ _D4core6thread10threadbase__T21thread_attachThis_tplTCQCaQBy8osthread6ThreadZQBwFNbZQBf@Base 12
+ _D4core6thread11__moduleRefZ@Base 12
+ _D4core6thread11threadgroup11ThreadGroup3addMFCQBtQBr8osthread6ThreadZv@Base 12
+ _D4core6thread11threadgroup11ThreadGroup6__initZ@Base 12
+ _D4core6thread11threadgroup11ThreadGroup6__vtblZ@Base 12
+ _D4core6thread11threadgroup11ThreadGroup6createMFDFZvZCQCbQBz8osthread6Thread@Base 12
+ _D4core6thread11threadgroup11ThreadGroup6createMFPFZvZCQCbQBz8osthread6Thread@Base 12
+ _D4core6thread11threadgroup11ThreadGroup6removeMFCQBwQBu8osthread6ThreadZv@Base 12
+ _D4core6thread11threadgroup11ThreadGroup7__ClassZ@Base 12
+ _D4core6thread11threadgroup11ThreadGroup7joinAllMFbZv@Base 12
+ _D4core6thread11threadgroup11ThreadGroup7opApplyMFMDFKCQCbQBz8osthread6ThreadZiZi@Base 12
+ _D4core6thread11threadgroup11__moduleRefZ@Base 12
+ _D4core6thread11threadgroup12__ModuleInfoZ@Base 12
+ _D4core6thread12__ModuleInfoZ@Base 12
+ _D4core6thread5fiber11__moduleRefZ@Base 12
+ _D4core6thread5fiber12__ModuleInfoZ@Base 12
+ _D4core6thread5fiber5Fiber10allocStackMFNbmmZv@Base 12
+ _D4core6thread5fiber5Fiber13yieldAndThrowFNbNiC6object9ThrowableZv@Base 12
+ _D4core6thread5fiber5Fiber19_staticCtor_L924_C9FZv@Base 12
+ _D4core6thread5fiber5Fiber3runMFZv@Base 12
+ _D4core6thread5fiber5Fiber4callMFEQBgQBeQBaQx7RethrowZC6object9Throwable@Base 12
+ _D4core6thread5fiber5Fiber5resetMFNbNiDFZvZv@Base 12
+ _D4core6thread5fiber5Fiber5resetMFNbNiPFZvZv@Base 12
+ _D4core6thread5fiber5Fiber5resetMFNbNiZv@Base 12
+ _D4core6thread5fiber5Fiber5stateMxFNaNbNdNiNfZEQBtQBrQBnQBk5State@Base 12
+ _D4core6thread5fiber5Fiber5yieldFNbNiZv@Base 12
+ _D4core6thread5fiber5Fiber6__ctorMFNbDFZvmmZCQBrQBpQBlQBi@Base 12
+ _D4core6thread5fiber5Fiber6__ctorMFNbPFZvmmZCQBrQBpQBlQBi@Base 12
+ _D4core6thread5fiber5Fiber6__dtorMFNbNiZv@Base 12
+ _D4core6thread5fiber5Fiber6__initZ@Base 12
+ _D4core6thread5fiber5Fiber6__vtblZ@Base 12
+ _D4core6thread5fiber5Fiber7__ClassZ@Base 12
+ _D4core6thread5fiber5Fiber7getThisFNbNiNfZCQBpQBnQBjQBg@Base 12
+ _D4core6thread5fiber5Fiber7setThisFNbNiCQBmQBkQBgQBdZv@Base 12
+ _D4core6thread5fiber5Fiber7sm_thisCQBhQBfQBbQy@Base 12
+ _D4core6thread5fiber5Fiber7sm_utxtSQBh3sys5posix8ucontext10ucontext_t@Base 12
+ _D4core6thread5fiber5Fiber8callImplMFNbNiZv@Base 12
+ _D4core6thread5fiber5Fiber8switchInMFNbNiZv@Base 12
+ _D4core6thread5fiber5Fiber9freeStackMFNbNiZv@Base 12
+ _D4core6thread5fiber5Fiber9initStackMFNbNiZv@Base 12
+ _D4core6thread5fiber5Fiber9switchOutMFNbNiZv@Base 12
+ _D4core6thread5fiber5Fiber__T4callVEQBiQBgQBcQz7Rethrowi0ZQBdMFNbNiZC6object9Throwable@Base 12
+ _D4core6thread5fiber5Fiber__T4callVEQBiQBgQBcQz7Rethrowi1ZQBdMFNiZC6object9Throwable@Base 12
+ _D4core6thread5types11__moduleRefZ@Base 12
+ _D4core6thread5types12__ModuleInfoZ@Base 12
+ _D4core6thread5types13ll_ThreadData6__initZ@Base 12
+ _D4core6thread5types17PTHREAD_STACK_MINym@Base 12
+ _D4core6thread5types24_sharedStaticCtor_L54_C1FZv@Base 12
+ _D4core6thread5types8PAGESIZEym@Base 12
+ _D4core6thread7context11__moduleRefZ@Base 12
+ _D4core6thread7context12StackContext6__initZ@Base 12
+ _D4core6thread7context12__ModuleInfoZ@Base 12
+ _D4core6thread7context8Callable6__initZ@Base 12
+ _D4core6thread7context8Callable6opCallMFZv@Base 12
+ _D4core6thread7context8Callable8opAssignMFNaNbNiNfDFZvZv@Base 12
+ _D4core6thread7context8Callable8opAssignMFNaNbNiNfPFZvZv@Base 12
+ _D4core6thread8osthread11__moduleRefZ@Base 12
+ _D4core6thread8osthread11getStackTopFNbNiZPv@Base 12
+ _D4core6thread8osthread11swapContextFNbNiPvZQd@Base 12
+ _D4core6thread8osthread12__ModuleInfoZ@Base 12
+ _D4core6thread8osthread12attachThreadFNbNiCQBpQBn10threadbase10ThreadBaseZQBg@Base 12
+ _D4core6thread8osthread12suspendCountSQBk3sys5posix9semaphore5sem_t@Base 12
+ _D4core6thread8osthread12thread_yieldFNbNiZv@Base 12
+ _D4core6thread8osthread14getStackBottomFNbNiZPv@Base 12
+ _D4core6thread8osthread15adjustStackSizeFNbNimZm@Base 12
+ _D4core6thread8osthread16_mainThreadStoreG177v@Base 12
+ _D4core6thread8osthread17thread_entryPointUNbPvZ21thread_cleanupHandlerUNaNbNiQBhZv@Base 12
+ _D4core6thread8osthread18callWithStackShellFNbMDFNbPvZvZv@Base 12
+ _D4core6thread8osthread18joinLowLevelThreadFNbNimZv@Base 12
+ _D4core6thread8osthread18resumeSignalNumberi@Base 12
+ _D4core6thread8osthread19suspendSignalNumberi@Base 12
+ _D4core6thread8osthread20createLowLevelThreadFNbNiDFNbZvkQhZ20thread_lowlevelEntryUNbPvZQd@Base 12
+ _D4core6thread8osthread20createLowLevelThreadFNbNiDFNbZvkQhZm@Base 12
+ _D4core6thread8osthread6Thread12PRIORITY_MAXFNaNbNdNiNeZxi@Base 12
+ _D4core6thread8osthread6Thread12PRIORITY_MINFNaNbNdNiNeZi@Base 12
+ _D4core6thread8osthread6Thread14loadPrioritiesFNbNiNeZSQCbQBzQBvQBp8Priority@Base 12
+ _D4core6thread8osthread6Thread16PRIORITY_DEFAULTFNaNbNdNiNeZi@Base 12
+ _D4core6thread8osthread6Thread3runMFZv@Base 12
+ _D4core6thread8osthread6Thread4joinMFbZC6object9Throwable@Base 12
+ _D4core6thread8osthread6Thread5cacheOSQBkQBiQBeQy8Priority@Base 12
+ _D4core6thread8osthread6Thread5sleepFNbNiSQBo4time8DurationZv@Base 12
+ _D4core6thread8osthread6Thread5startMFNbZCQBoQBmQBiQBc@Base 12
+ _D4core6thread8osthread6Thread5yieldFNbNiZv@Base 12
+ _D4core6thread8osthread6Thread6__ctorMFNaNbNiNfDFZvmZCQCaQByQBuQBo@Base 12
+ _D4core6thread8osthread6Thread6__ctorMFNaNbNiNfPFZvmZCQCaQByQBuQBo@Base 12
+ _D4core6thread8osthread6Thread6__ctorMFNaNbNiNfmZCQBwQBuQBqQBk@Base 12
+ _D4core6thread8osthread6Thread6__dtorMFNbNiZv@Base 12
+ _D4core6thread8osthread6Thread6__initZ@Base 12
+ _D4core6thread8osthread6Thread6__vtblZ@Base 12
+ _D4core6thread8osthread6Thread7__ClassZ@Base 12
+ _D4core6thread8osthread6Thread7getThisFNbNiNfZCQBtQBrQBnQBh@Base 12
+ _D4core6thread8osthread6Thread8Priority6__initZ@Base 12
+ _D4core6thread8osthread6Thread8priorityMFNdZi@Base 12
+ _D4core6thread8osthread6Thread8priorityMFNdiZv@Base 12
+ _D4core6thread8osthread6Thread9isRunningMFNbNdNiZb@Base 12
+ _D4core6thread8osthread6Thread__T10loadGlobalVAyaa12_5052494f524954595f4d4158ZQBtFNbNiNfZi@Base 12
+ _D4core6thread8osthread6Thread__T10loadGlobalVAyaa12_5052494f524954595f4d494eZQBtFNbNiNfZi@Base 12
+ _D4core6thread8osthread6Thread__T10loadGlobalVAyaa16_5052494f524954595f44454641554c54ZQCbFNbNiNfZi@Base 12
+ _D4core6thread8osthread6resumeFNbNiCQBiQBg10threadbase10ThreadBaseZv@Base 12
+ _D4core6thread8osthread7suspendFNbNiCQBjQBhQBd6ThreadZb@Base 12
+ _D4core6thread8osthread8toThreadFNaNbNiNeNkMCQBrQBp10threadbase10ThreadBaseZCQCxQCvQCr6Thread@Base 12
+ _D4core6vararg11__moduleRefZ@Base 12
+ _D4core6vararg12__ModuleInfoZ@Base 12
+ _D4core7runtime11__moduleRefZ@Base 12
+ _D4core7runtime12__ModuleInfoZ@Base 12
+ _D4core7runtime14UnitTestResult6__initZ@Base 12
+ _D4core7runtime18runModuleUnitTestsUZ19unittestSegvHandlerUiPSQCi3sys5posix6signal9siginfo_tPvZv@Base 12
+ _D4core7runtime19defaultTraceHandlerFPvZC6object9Throwable9TraceInfo@Base 12
+ _D4core7runtime25_sharedStaticCtor_L119_C1FZv@Base 12
+ _D4core7runtime5CArgs6__initZ@Base 12
+ _D4core7runtime7Runtime10initializeFZb@Base 12
+ _D4core7runtime7Runtime16moduleUnitTesterFNdPFZbZv@Base 12
+ _D4core7runtime7Runtime16moduleUnitTesterFNdZPFZb@Base 12
+ _D4core7runtime7Runtime19sm_moduleUnitTesterPFZb@Base 12
+ _D4core7runtime7Runtime22sm_extModuleUnitTesterPFZSQBxQBv14UnitTestResult@Base 12
+ _D4core7runtime7Runtime24extendedModuleUnitTesterFNdPFZSQCcQCa14UnitTestResultZv@Base 12
+ _D4core7runtime7Runtime24extendedModuleUnitTesterFNdZPFZSQCdQCb14UnitTestResult@Base 12
+ _D4core7runtime7Runtime6__initZ@Base 12
+ _D4core7runtime7Runtime9terminateFZb@Base 12
+ _D4core8builtins11__ctfeWriteFNaNbNiNfMAxaZv@Base 12
+ _D4core8builtins11__moduleRefZ@Base 12
+ _D4core8builtins12__ModuleInfoZ@Base 12
+ _D4core8demangle11__moduleRefZ@Base 12
+ _D4core8demangle12__ModuleInfoZ@Base 12
+ _D4core8demangle12demangleTypeFNaNbNfAxaAaZQd@Base 12
+ _D4core8demangle15decodeDmdStringFNaNbNfAxaKmZAya@Base 12
+ _D4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks10parseLNameMFNaNlNfMKSQDeQDc__T8DemangleTSQDyQDwQDqFNaNbNfNkMQDcZQDcZQBmZb@Base 12
+ _D4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks11Replacement6__initZ@Base 12
+ _D4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks11__xopEqualsMxFKxSQDaQCyQCsFNaNbNfNkMQCeZQCeZb@Base 12
+ _D4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks13encodeBackrefMFNaNbNlNfmZv@Base 12
+ _D4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks13flushPositionMFNaNbNlNfKSQDiQDg__T8DemangleTSQEcQEaQDuFNaNbNfNkMQDgZQDgZQBmZv@Base 12
+ _D4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks16positionInResultMFNaNbNiNlNfmZm@Base 12
+ _D4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks6__initZ@Base 12
+ _D4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks9__xtoHashFNbNeKxSQCzQCxQCrFNaNbNfNkMQCdZQCdZm@Base 12
+ _D4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks9parseTypeMFNaNjNfKSQDbQCz__T8DemangleTSQDvQDtQDnFNaNbNfNkMQCzZQCzZQBmAaZQd@Base 12
+ _D4core8demangle15reencodeMangledFNaNbNfNkMAxaZAa@Base 12
+ _D4core8demangle7NoHooks6__initZ@Base 12
+ _D4core8demangleQjFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T10mangleFuncHTPFMDFyPS6object10ModuleInfoZiZiTQBfZQByFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T10mangleFuncHTPFNbMDFNbPvZvZvTQpZQBhFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T10mangleFuncHTPFNbNiAyaMDFNbNiQkZQnbZQrTQzZQBrFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T10mangleFuncHTPFNbNiAyakQeQgmZvTQrZQBjFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T10mangleFuncHTPFNbNiCQBm6thread10threadbase10ThreadBaseZQBkTQBtZQCmFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T10mangleFuncHTPFNbNiCQBm6thread10threadbase10ThreadBaseZvTQBrZQCkFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T10mangleFuncHTPFNbNiPvZQdTQlZQBdFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T10mangleFuncHTPFNbNiPvZvTQkZQBcFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T10mangleFuncHTPFNbNiZPvTQjZQBbFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T10mangleFuncHTPFNbNiZmTQiZQBaFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T10mangleFuncHTPFNbNiZvTQiZQBaFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T10mangleFuncHTPFNbPvMDFNbQhQjZvZvTQtZQBlFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T10mangleFuncHTPFNbPvMDFNbQhZiZvTQrZQBjFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T6mangleTFMDFyPS6object10ModuleInfoZiZiZQBnFNaNbNfNkMAxaNkMAaZ11DotSplitter10indexOfDotMxFNaNbNiNlNfZl@Base 12
+ _D4core8demangle__T6mangleTFMDFyPS6object10ModuleInfoZiZiZQBnFNaNbNfNkMAxaNkMAaZ11DotSplitter11__xopEqualsMxFKxSQEgQEe__TQDyTQDuZQEgFNaNbNfNkMQCtNkMQCtZQCuZb@Base 12
+ _D4core8demangle__T6mangleTFMDFyPS6object10ModuleInfoZiZiZQBnFNaNbNfNkMAxaNkMAaZ11DotSplitter5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8demangle__T6mangleTFMDFyPS6object10ModuleInfoZiZiZQBnFNaNbNfNkMAxaNkMAaZ11DotSplitter5frontMxFNaNbNdNiNjNfZQBs@Base 12
+ _D4core8demangle__T6mangleTFMDFyPS6object10ModuleInfoZiZiZQBnFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D4core8demangle__T6mangleTFMDFyPS6object10ModuleInfoZiZiZQBnFNaNbNfNkMAxaNkMAaZ11DotSplitter8popFrontMFNaNbNiNlNfZv@Base 12
+ _D4core8demangle__T6mangleTFMDFyPS6object10ModuleInfoZiZiZQBnFNaNbNfNkMAxaNkMAaZ11DotSplitter9__xtoHashFNbNeKxSQEfQEd__TQDxTQDtZQEfFNaNbNfNkMQCsNkMQCsZQCtZm@Base 12
+ _D4core8demangle__T6mangleTFMDFyPS6object10ModuleInfoZiZiZQBnFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T6mangleTFNbMDFNbPvZvZvZQxFNaNbNfNkMAxaNkMAaZ11DotSplitter10indexOfDotMxFNaNbNiNlNfZl@Base 12
+ _D4core8demangle__T6mangleTFNbMDFNbPvZvZvZQxFNaNbNfNkMAxaNkMAaZ11DotSplitter11__xopEqualsMxFKxSQDpQDn__TQDhTQDdZQDpFNaNbNfNkMQCtNkMQCtZQCuZb@Base 12
+ _D4core8demangle__T6mangleTFNbMDFNbPvZvZvZQxFNaNbNfNkMAxaNkMAaZ11DotSplitter5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8demangle__T6mangleTFNbMDFNbPvZvZvZQxFNaNbNfNkMAxaNkMAaZ11DotSplitter5frontMxFNaNbNdNiNjNfZQBs@Base 12
+ _D4core8demangle__T6mangleTFNbMDFNbPvZvZvZQxFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D4core8demangle__T6mangleTFNbMDFNbPvZvZvZQxFNaNbNfNkMAxaNkMAaZ11DotSplitter8popFrontMFNaNbNiNlNfZv@Base 12
+ _D4core8demangle__T6mangleTFNbMDFNbPvZvZvZQxFNaNbNfNkMAxaNkMAaZ11DotSplitter9__xtoHashFNbNeKxSQDoQDm__TQDgTQDcZQDoFNaNbNfNkMQCsNkMQCsZQCtZm@Base 12
+ _D4core8demangle__T6mangleTFNbMDFNbPvZvZvZQxFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyaMDFNbNiQkZQnbZQrZQBhFNaNbNfNkMAxaNkMAaZ11DotSplitter10indexOfDotMxFNaNbNiNlNfZl@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyaMDFNbNiQkZQnbZQrZQBhFNaNbNfNkMAxaNkMAaZ11DotSplitter11__xopEqualsMxFKxSQEaQDy__TQDsTQDoZQEaFNaNbNfNkMQCtNkMQCtZQCuZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyaMDFNbNiQkZQnbZQrZQBhFNaNbNfNkMAxaNkMAaZ11DotSplitter5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyaMDFNbNiQkZQnbZQrZQBhFNaNbNfNkMAxaNkMAaZ11DotSplitter5frontMxFNaNbNdNiNjNfZQBs@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyaMDFNbNiQkZQnbZQrZQBhFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyaMDFNbNiQkZQnbZQrZQBhFNaNbNfNkMAxaNkMAaZ11DotSplitter8popFrontMFNaNbNiNlNfZv@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyaMDFNbNiQkZQnbZQrZQBhFNaNbNfNkMAxaNkMAaZ11DotSplitter9__xtoHashFNbNeKxSQDzQDx__TQDrTQDnZQDzFNaNbNfNkMQCsNkMQCsZQCtZm@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyaMDFNbNiQkZQnbZQrZQBhFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyakQeQgmZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter10indexOfDotMxFNaNbNiNlNfZl@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyakQeQgmZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter11__xopEqualsMxFKxSQDrQDp__TQDjTQDfZQDrFNaNbNfNkMQCtNkMQCtZQCuZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyakQeQgmZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyakQeQgmZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter5frontMxFNaNbNdNiNjNfZQBs@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyakQeQgmZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyakQeQgmZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter8popFrontMFNaNbNiNlNfZv@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyakQeQgmZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter9__xtoHashFNbNeKxSQDqQDo__TQDiTQDeZQDqFNaNbNfNkMQCsNkMQCsZQCtZm@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyakQeQgmZvZQzFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZQBkZQCbFNaNbNfNkMAxaNkMAaZ11DotSplitter10indexOfDotMxFNaNbNiNlNfZl@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZQBkZQCbFNaNbNfNkMAxaNkMAaZ11DotSplitter11__xopEqualsMxFKxSQEuQEs__TQEmTQEiZQEuFNaNbNfNkMQCtNkMQCtZQCuZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZQBkZQCbFNaNbNfNkMAxaNkMAaZ11DotSplitter5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZQBkZQCbFNaNbNfNkMAxaNkMAaZ11DotSplitter5frontMxFNaNbNdNiNjNfZQBs@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZQBkZQCbFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZQBkZQCbFNaNbNfNkMAxaNkMAaZ11DotSplitter8popFrontMFNaNbNiNlNfZv@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZQBkZQCbFNaNbNfNkMAxaNkMAaZ11DotSplitter9__xtoHashFNbNeKxSQEtQEr__TQElTQEhZQEtFNaNbNfNkMQCsNkMQCsZQCtZm@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZQBkZQCbFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZvZQBzFNaNbNfNkMAxaNkMAaZ11DotSplitter10indexOfDotMxFNaNbNiNlNfZl@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZvZQBzFNaNbNfNkMAxaNkMAaZ11DotSplitter11__xopEqualsMxFKxSQEsQEq__TQEkTQEgZQEsFNaNbNfNkMQCtNkMQCtZQCuZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZvZQBzFNaNbNfNkMAxaNkMAaZ11DotSplitter5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZvZQBzFNaNbNfNkMAxaNkMAaZ11DotSplitter5frontMxFNaNbNdNiNjNfZQBs@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZvZQBzFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZvZQBzFNaNbNfNkMAxaNkMAaZ11DotSplitter8popFrontMFNaNbNiNlNfZv@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZvZQBzFNaNbNfNkMAxaNkMAaZ11DotSplitter9__xtoHashFNbNeKxSQErQEp__TQEjTQEfZQErFNaNbNfNkMQCsNkMQCsZQCtZm@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZvZQBzFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZQdZQtFNaNbNfNkMAxaNkMAaZ11DotSplitter10indexOfDotMxFNaNbNiNlNfZl@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZQdZQtFNaNbNfNkMAxaNkMAaZ11DotSplitter11__xopEqualsMxFKxSQDlQDj__TQDdTQCzZQDlFNaNbNfNkMQCtNkMQCtZQCuZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZQdZQtFNaNbNfNkMAxaNkMAaZ11DotSplitter5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZQdZQtFNaNbNfNkMAxaNkMAaZ11DotSplitter5frontMxFNaNbNdNiNjNfZQBs@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZQdZQtFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZQdZQtFNaNbNfNkMAxaNkMAaZ11DotSplitter8popFrontMFNaNbNiNlNfZv@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZQdZQtFNaNbNfNkMAxaNkMAaZ11DotSplitter9__xtoHashFNbNeKxSQDkQDi__TQDcTQCyZQDkFNaNbNfNkMQCsNkMQCsZQCtZm@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZQdZQtFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZvZQsFNaNbNfNkMAxaNkMAaZ11DotSplitter10indexOfDotMxFNaNbNiNlNfZl@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZvZQsFNaNbNfNkMAxaNkMAaZ11DotSplitter11__xopEqualsMxFKxSQDkQDi__TQDcTQCyZQDkFNaNbNfNkMQCtNkMQCtZQCuZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZvZQsFNaNbNfNkMAxaNkMAaZ11DotSplitter5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZvZQsFNaNbNfNkMAxaNkMAaZ11DotSplitter5frontMxFNaNbNdNiNjNfZQBs@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZvZQsFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZvZQsFNaNbNfNkMAxaNkMAaZ11DotSplitter8popFrontMFNaNbNiNlNfZv@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZvZQsFNaNbNfNkMAxaNkMAaZ11DotSplitter9__xtoHashFNbNeKxSQDjQDh__TQDbTQCxZQDjFNaNbNfNkMQCsNkMQCsZQCtZm@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZvZQsFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T6mangleTFNbNiZPvZQrFNaNbNfNkMAxaNkMAaZ11DotSplitter10indexOfDotMxFNaNbNiNlNfZl@Base 12
+ _D4core8demangle__T6mangleTFNbNiZPvZQrFNaNbNfNkMAxaNkMAaZ11DotSplitter11__xopEqualsMxFKxSQDjQDh__TQDbTQCxZQDjFNaNbNfNkMQCtNkMQCtZQCuZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiZPvZQrFNaNbNfNkMAxaNkMAaZ11DotSplitter5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiZPvZQrFNaNbNfNkMAxaNkMAaZ11DotSplitter5frontMxFNaNbNdNiNjNfZQBs@Base 12
+ _D4core8demangle__T6mangleTFNbNiZPvZQrFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D4core8demangle__T6mangleTFNbNiZPvZQrFNaNbNfNkMAxaNkMAaZ11DotSplitter8popFrontMFNaNbNiNlNfZv@Base 12
+ _D4core8demangle__T6mangleTFNbNiZPvZQrFNaNbNfNkMAxaNkMAaZ11DotSplitter9__xtoHashFNbNeKxSQDiQDg__TQDaTQCwZQDiFNaNbNfNkMQCsNkMQCsZQCtZm@Base 12
+ _D4core8demangle__T6mangleTFNbNiZPvZQrFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T6mangleTFNbNiZmZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter10indexOfDotMxFNaNbNiNlNfZl@Base 12
+ _D4core8demangle__T6mangleTFNbNiZmZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter11__xopEqualsMxFKxSQDiQDg__TQDaTQCwZQDiFNaNbNfNkMQCtNkMQCtZQCuZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiZmZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiZmZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter5frontMxFNaNbNdNiNjNfZQBs@Base 12
+ _D4core8demangle__T6mangleTFNbNiZmZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D4core8demangle__T6mangleTFNbNiZmZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter8popFrontMFNaNbNiNlNfZv@Base 12
+ _D4core8demangle__T6mangleTFNbNiZmZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter9__xtoHashFNbNeKxSQDhQDf__TQCzTQCvZQDhFNaNbNfNkMQCsNkMQCsZQCtZm@Base 12
+ _D4core8demangle__T6mangleTFNbNiZmZQqFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T6mangleTFNbNiZvZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter10indexOfDotMxFNaNbNiNlNfZl@Base 12
+ _D4core8demangle__T6mangleTFNbNiZvZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter11__xopEqualsMxFKxSQDiQDg__TQDaTQCwZQDiFNaNbNfNkMQCtNkMQCtZQCuZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiZvZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiZvZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter5frontMxFNaNbNdNiNjNfZQBs@Base 12
+ _D4core8demangle__T6mangleTFNbNiZvZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D4core8demangle__T6mangleTFNbNiZvZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter8popFrontMFNaNbNiNlNfZv@Base 12
+ _D4core8demangle__T6mangleTFNbNiZvZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter9__xtoHashFNbNeKxSQDhQDf__TQCzTQCvZQDhFNaNbNfNkMQCsNkMQCsZQCtZm@Base 12
+ _D4core8demangle__T6mangleTFNbNiZvZQqFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhQjZvZvZQBbFNaNbNfNkMAxaNkMAaZ11DotSplitter10indexOfDotMxFNaNbNiNlNfZl@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhQjZvZvZQBbFNaNbNfNkMAxaNkMAaZ11DotSplitter11__xopEqualsMxFKxSQDuQDs__TQDmTQDiZQDuFNaNbNfNkMQCtNkMQCtZQCuZb@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhQjZvZvZQBbFNaNbNfNkMAxaNkMAaZ11DotSplitter5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhQjZvZvZQBbFNaNbNfNkMAxaNkMAaZ11DotSplitter5frontMxFNaNbNdNiNjNfZQBs@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhQjZvZvZQBbFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhQjZvZvZQBbFNaNbNfNkMAxaNkMAaZ11DotSplitter8popFrontMFNaNbNiNlNfZv@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhQjZvZvZQBbFNaNbNfNkMAxaNkMAaZ11DotSplitter9__xtoHashFNbNeKxSQDtQDr__TQDlTQDhZQDtFNaNbNfNkMQCsNkMQCsZQCtZm@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhQjZvZvZQBbFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhZiZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter10indexOfDotMxFNaNbNiNlNfZl@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhZiZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter11__xopEqualsMxFKxSQDrQDp__TQDjTQDfZQDrFNaNbNfNkMQCtNkMQCtZQCuZb@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhZiZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhZiZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter5frontMxFNaNbNdNiNjNfZQBs@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhZiZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhZiZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter8popFrontMFNaNbNiNlNfZv@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhZiZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter9__xtoHashFNbNeKxSQDqQDo__TQDiTQDeZQDqFNaNbNfNkMQCsNkMQCsZQCtZm@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhZiZvZQzFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl10isHexDigitFNaNbNiNfaZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl10parseLNameMFNaNlNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl10parseValueMFNaNlNfMAaaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl11__xopEqualsMxFKxSQDyQDw__TQDqTQDkZQDyZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl11peekBackrefMFNaNfZa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl11sliceNumberMFNaNjNfZQBs@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl12decodeNumberMFNaNlNfMQBtZm@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl12decodeNumberMFNaNlNfZm@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl12demangleNameMFNaNbNfZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl12demangleTypeMFNaNbNfZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl13parseFuncAttrMFNaNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl13parseModifierMFNaNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl14ParseException6__ctorMFNaNbNiNfAyaZCQErQEp__TQEjTQEdZQErQCg@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl14ParseException6__initZ@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl14ParseException6__vtblZ@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl14ParseException7__ClassZ@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl15parseSymbolNameMFNaNlNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl16isCallConventionFNaNbNiNfaZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl16parseMangledNameMFNaNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl16parseMangledNameMFNaNlNfbmZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl17OverflowException6__ctorMFNaNbNiNfAyaZCQEuQEs__TQEmTQEgZQEuQCj@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl17OverflowException6__initZ@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl17OverflowException6__vtblZ@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl17OverflowException7__ClassZ@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl17isSymbolNameFrontMFNaNfZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl17parseIntegerValueMFNaNlNfMAaaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl17parseTemplateArgsMFNaNlNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl17parseTypeFunctionMFNaNjNfAaEQEjQEh__TQEbTQDvZQEj10IsDelegateZQBk@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl18parseFuncArgumentsMFNaNlNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl18parseQualifiedNameMFNaNjNfZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl19mayBeMangledNameArgMFNaNfZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl19parseCallConventionMFNaNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl19parseMangledNameArgMFNaNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl25mayBeTemplateInstanceNameMFNaNlNfZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl25parseFunctionTypeNoReturnMFNaNjNfbZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl25parseTemplateInstanceNameMFNaNlNfbZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl3eatMFNaNfaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl3padMFNaNfQBgZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl3putMFNaNjNfMQBjZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl3putMFNaNjNfaZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl4peekMFNaNbNiNfmZa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl4testMFNaNfaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl5emptyMFNaNbNdNiNfZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl5errorFNaNeAyaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl5frontMFNaNbNdNiNfZa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl5matchMFNaNfQBiZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl5matchMFNaNfaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl5shiftMFNaNiNfQBkZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl6__ctorMFNaNbNcNiNfNkMQBsEQEfQEd__TQDxTQDrZQEf7AddTypeNkMAaZSQFoQFm__TQFgTQFaZQFo@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl6__ctorMFNaNbNcNiNfNkMQBsNkMAaZSQElQEj__TQEdTQDxZQEl@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl6__initZ@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl6appendMFNaNjNfQBlZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl6removeMFNaNbNiNfQBnZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl6silentMFNaNfDFNaNfZvZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl7isAlphaFNaNbNiNfaZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl7isDigitFNaNbNiNfaZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl8containsFNaNbNiNeQBoQBrZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl8overflowFNaNiNeAyaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl8popFrontMFNaNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl8popFrontMFNaNfiZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl8putAsHexMFNaNfmiZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl8putCommaMFNaNfmZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl9__xtoHashFNbNeKxSQDxQDv__TQDpTQDjZQDxZm@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl9ascii2hexFNaNfaZh@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl9copyInputMFNaNbNjNfZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl9parseRealMFNaNlNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl9parseTypeMFNaNjNfAaZ10primitivesyG23Aa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl9parseTypeMFNaNjNfAaZQd@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl__T10doDemangleSQDvQDt__TQDnTQDhZQDv16parseMangledNameZQCaMFNaNbNjNfZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl__T10doDemangleS_DQDxQDv__TQDpTQDjZQDx9parseTypeMFNaNjNfAaZQdZQChMFNaNbNjNfZQu@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl__T13decodeBackrefVii0ZQuMFNaNfZm@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl__T13decodeBackrefVmi1ZQuMFNaNfZm@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa10isHexDigitFNaNbNiNfaZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa10parseLNameMFNaNlNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa10parseValueMFNaNlNfMAaaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa11__xopEqualsMxFKxSQCnQCl__TQCfTQBzZQCnZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa11peekBackrefMFNaNfZa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa11sliceNumberMFNaNjNfZAxa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa12decodeNumberMFNaNlNfMAxaZm@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa12decodeNumberMFNaNlNfZm@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa12demangleNameMFNaNbNfZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa12demangleTypeMFNaNbNfZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa13parseFuncAttrMFNaNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa13parseModifierMFNaNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa14ParseException6__ctorMFNaNbNiNfAyaZCQDgQDe__TQCyTQCsZQDgQCg@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa14ParseException6__initZ@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa14ParseException6__vtblZ@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa14ParseException7__ClassZ@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa15parseSymbolNameMFNaNlNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa16isCallConventionFNaNbNiNfaZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa16parseMangledNameMFNaNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa16parseMangledNameMFNaNlNfbmZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa17OverflowException6__ctorMFNaNbNiNfAyaZCQDjQDh__TQDbTQCvZQDjQCj@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa17OverflowException6__initZ@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa17OverflowException6__vtblZ@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa17OverflowException7__ClassZ@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa17isSymbolNameFrontMFNaNfZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa17parseIntegerValueMFNaNlNfMAaaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa17parseTemplateArgsMFNaNlNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa17parseTypeFunctionMFNaNjNfAaEQCyQCw__TQCqTQCkZQCy10IsDelegateZQBk@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa18parseFuncArgumentsMFNaNlNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa18parseQualifiedNameMFNaNjNfZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa19mayBeMangledNameArgMFNaNfZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa19parseCallConventionMFNaNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa19parseMangledNameArgMFNaNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa25mayBeTemplateInstanceNameMFNaNlNfZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa25parseFunctionTypeNoReturnMFNaNjNfbZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa25parseTemplateInstanceNameMFNaNlNfbZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa3eatMFNaNfaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa3padMFNaNfAxaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa3putMFNaNjNfMAxaZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa3putMFNaNjNfaZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa4peekMFNaNbNiNfmZa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa4testMFNaNfaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa5emptyMFNaNbNdNiNfZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa5errorFNaNeAyaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa5frontMFNaNbNdNiNfZa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa5matchMFNaNfAxaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa5matchMFNaNfaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa5shiftMFNaNiNfAxaZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa6__ctorMFNaNbNcNiNfNkMAxaEQCuQCs__TQCmTQCgZQCu7AddTypeNkMAaZSQEdQEb__TQDvTQDpZQEd@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa6__ctorMFNaNbNcNiNfNkMAxaNkMAaZSQDaQCy__TQCsTQCmZQDa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa6__initZ@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa6appendMFNaNjNfAxaZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa6removeMFNaNbNiNfAxaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa6silentMFNaNfDFNaNfZvZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa7isAlphaFNaNbNiNfaZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa7isDigitFNaNbNiNfaZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa8containsFNaNbNiNeAxaQdZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa8overflowFNaNiNeAyaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa8popFrontMFNaNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa8popFrontMFNaNfiZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa8putAsHexMFNaNfmiZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa8putCommaMFNaNfmZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa9__xtoHashFNbNeKxSQCmQCk__TQCeTQByZQCmZm@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa9ascii2hexFNaNfaZh@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa9copyInputMFNaNbNjNfZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa9parseRealMFNaNlNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa9parseTypeMFNaNjNfAaZ10primitivesyG23Aa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa9parseTypeMFNaNjNfAaZQd@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa__T10doDemangleSQCkQCi__TQCcTQBwZQCk16parseMangledNameZQCaMFNaNbNjNfZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa__T10doDemangleS_DQCmQCk__TQCeTQByZQCm9parseTypeMFNaNjNfAaZQdZQChMFNaNbNjNfZQu@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa__T13decodeBackrefVii0ZQuMFNaNfZm@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa__T13decodeBackrefVmi1ZQuMFNaNfZm@Base 12
+ _D4core8internal10attributes11__moduleRefZ@Base 12
+ _D4core8internal10attributes12__ModuleInfoZ@Base 12
+ _D4core8internal10entrypoint11__moduleRefZ@Base 12
+ _D4core8internal10entrypoint12__ModuleInfoZ@Base 12
+ _D4core8internal11destruction11__moduleRefZ@Base 12
+ _D4core8internal11destruction12__ModuleInfoZ@Base 12
+ _D4core8internal11destruction__T15destructRecurseTS3gcc8sections3elf9ThreadDSOZQBvFNaNbNiNfKQBqZv@Base 12
+ _D4core8internal11destruction__T15destructRecurseTSQBx2gc11gcinterface4RootZQBsFNaNbNiNfKQBnZv@Base 12
+ _D4core8internal11destruction__T15destructRecurseTSQBx2gc11gcinterface5RangeZQBtFNaNbNiNfKQBoZv@Base 12
+ _D4core8internal11destruction__T15destructRecurseTSQBxQBv9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQDlFNaNbNiNfKQDgZv@Base 12
+ _D4core8internal11destruction__T15destructRecurseTSQBxQBv9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQDnFNaNbNiNfKQDiZv@Base 12
+ _D4core8internal11destruction__T15destructRecurseTSQBxQBv9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQDlFNaNbNiNfKQDgZv@Base 12
+ _D4core8internal12parseoptions10parseErrorFNbNiMxAaMxQeMxQiAxaZb@Base 12
+ _D4core8internal12parseoptions11__moduleRefZ@Base 12
+ _D4core8internal12parseoptions12__ModuleInfoZ@Base 12
+ _D4core8internal12parseoptions15overflowedErrorFNbNiMxAaMxQeZb@Base 12
+ _D4core8internal12parseoptions3minFNbNimmZm@Base 12
+ _D4core8internal12parseoptions5parseFNbNiAxaKANgaKQfQlZb@Base 12
+ _D4core8internal12parseoptions5parseFNbNiAxaKANgaKbQkZb@Base 12
+ _D4core8internal12parseoptions5parseFNbNiAxaKANgaKfQkZb@Base 12
+ _D4core8internal12parseoptions6MemVal6__initZ@Base 12
+ _D4core8internal12parseoptions8optErrorFNbNiMxAaMxQeAxaZb@Base 12
+ _D4core8internal12parseoptions__T12parseOptionsTSQBv2gc6config6ConfigZQBlFNbNiKQBfAyaZb@Base 12
+ _D4core8internal12parseoptions__T14rt_parseOptionTbZQtFNbNiAxaKANgaKbQkZb@Base 12
+ _D4core8internal12parseoptions__T17initConfigOptionsTSQCa2gc6config6ConfigZQBqFNbNiKQBfAyaZb@Base 12
+ _D4core8internal12parseoptions__T4skipX7isspaceZQpFNaNbNiNfANgaZQf@Base 12
+ _D4core8internal12parseoptions__T5parseHThZQkFNbNiAxaKANgaKhQkbZb@Base 12
+ _D4core8internal12parseoptions__T5parseHTkZQkFNbNiAxaKANgaKkQkbZb@Base 12
+ _D4core8internal12parseoptions__T5parseHTmZQkFNbNiAxaKANgaKmQkbZb@Base 12
+ _D4core8internal2gc2os10isLowOnMemFNbNimZb@Base 12
+ _D4core8internal2gc2os10os_mem_mapFNbNimbZPv@Base 12
+ _D4core8internal2gc2os11__moduleRefZ@Base 12
+ _D4core8internal2gc2os12__ModuleInfoZ@Base 12
+ _D4core8internal2gc2os12os_mem_unmapFNbNiPvmZi@Base 12
+ _D4core8internal2gc2os15os_physical_memFNbNiZm@Base 12
+ _D4core8internal2gc2os8wait_pidFNbNiibZEQBmQBkQBeQBe11ChildStatus@Base 12
+ _D4core8internal2gc4bits11__moduleRefZ@Base 12
+ _D4core8internal2gc4bits12__ModuleInfoZ@Base 12
+ _D4core8internal2gc4bits6GCBits10clearWordsMFNbNimmZv@Base 12
+ _D4core8internal2gc4bits6GCBits10copyRangeZMFNbNimmPxmZv@Base 12
+ _D4core8internal2gc4bits6GCBits16copyWordsShiftedMFNbNimmmPxmZv@Base 12
+ _D4core8internal2gc4bits6GCBits18copyRangeRepeatingMFNbNimmPxmmZv@Base 12
+ _D4core8internal2gc4bits6GCBits3setMFNaNbNiNlNemZi@Base 12
+ _D4core8internal2gc4bits6GCBits4DtorMFNbNibZv@Base 12
+ _D4core8internal2gc4bits6GCBits4copyMFNbNiPSQBqQBoQBiQBiQBgZv@Base 12
+ _D4core8internal2gc4bits6GCBits4testMxFNaNbNiNlNemZm@Base 12
+ _D4core8internal2gc4bits6GCBits4zeroMFNbNiZv@Base 12
+ _D4core8internal2gc4bits6GCBits5allocMFNbNimbZv@Base 12
+ _D4core8internal2gc4bits6GCBits5clearMFNaNbNiNlNemZi@Base 12
+ _D4core8internal2gc4bits6GCBits6__initZ@Base 12
+ _D4core8internal2gc4bits6GCBits6nwordsMxFNaNbNdNiZm@Base 12
+ _D4core8internal2gc4bits6GCBits6setAllMFNbNiZv@Base 12
+ _D4core8internal2gc4bits6GCBits8clrRangeMFNbNimmZv@Base 12
+ _D4core8internal2gc4bits6GCBits8setRangeMFNbNimmZv@Base 12
+ _D4core8internal2gc4bits6GCBits8setWordsMFNbNimmZv@Base 12
+ _D4core8internal2gc4bits6GCBits9clrRangeZMFNbNimmZv@Base 12
+ _D4core8internal2gc4bits6GCBits9copyRangeMFNbNimmPxmZv@Base 12
+ _D4core8internal2gc4bits6GCBits9copyWordsMFNbNimmPxmZv@Base 12
+ _D4core8internal2gc4bits6GCBits9setLockedMFNaNbNiNlNemZm@Base 12
+ _D4core8internal2gc4bits6GCBits9setRangeZMFNbNimmZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw10baseOffsetFNbNimEQCfQCdQBxQBxQBvQCg4BinsZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw10extendTimel@Base 12
+ _D4core8internal2gc4impl12conservativeQw10initializeFZCQCbQBq11gcinterface2GC@Base 12
+ _D4core8internal2gc4impl12conservativeQw10mallocTimel@Base 12
+ _D4core8internal2gc4impl12conservativeQw10numExtendsl@Base 12
+ _D4core8internal2gc4impl12conservativeQw10numMallocsl@Base 12
+ _D4core8internal2gc4impl12conservativeQw11__moduleRefZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw11calcBinBaseFZG15G256s@Base 12
+ _D4core8internal2gc4impl12conservativeQw11numReallocsl@Base 12
+ _D4core8internal2gc4impl12conservativeQw11reallocTimel@Base 12
+ _D4core8internal2gc4impl12conservativeQw12LeakDetector10initializeFNbPSQCrQCpQCjQCjQChQCs3GcxZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw12LeakDetector10log_mallocFNbPvmZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw12LeakDetector10log_parentFNbPvQcZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw12LeakDetector11log_collectFNbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw12LeakDetector6__initZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw12LeakDetector8log_freeFNbNiPvmZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw12__ModuleInfoZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw12maxPauseTimeSQCb4time8Duration@Base 12
+ _D4core8internal2gc4impl12conservativeQw12sentinel_addFNbNiPvZQd@Base 12
+ _D4core8internal2gc4impl12conservativeQw12sentinel_subFNbNiPvZQd@Base 12
+ _D4core8internal2gc4impl12conservativeQw13maxPoolMemorym@Base 12
+ _D4core8internal2gc4impl12conservativeQw13sentinel_initFNbNiPvmZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw13sentinel_sizeFNbNixPvmZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC10freeNoSyncMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC10removeRootMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC11checkNoSyncMFNbPvZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC11fullCollectMFNbZ2goFNbPSQDcQDaQCuQCuQCsQDd3GcxZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC11fullCollectMFNbZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC11inFinalizerMFNbNiNfZb@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC11queryNoSyncMFNbPvZSQCx6memory8BlkInfo_@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC11removeRangeMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC12_inFinalizerb@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC12addrOfNoSyncMFNbNiPvZQd@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC12extendNoSyncMFNbPvmmxC8TypeInfoZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC12mallocNoSyncMFNbmkKmxC8TypeInfoZPv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC12profileStatsMFNbNiNeZSQDa6memory2GC12ProfileStats@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC12sizeOfNoSyncMFNbNiPvZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC13reallocNoSyncMFNbPvmKkKmxC8TypeInfoZQt@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC13reserveNoSyncMFNbmZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC13runFinalizersMFNbMxAvZ2goFNbPSQDiQDgQDaQDaQCyQDj3GcxMxQBjZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC13runFinalizersMFNbMxAvZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC14collectNoStackMFNbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC14getStatsNoSyncMFNbNiNeJSQDc6memory2GC5StatsZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC18fullCollectNoStackMFNbZ2goFNbPSQDjQDhQDbQDbQCzQDk3GcxZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC18fullCollectNoStackMFNbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC24allocatedInCurrentThreadMFNbZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC4filePa@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC4freeMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC4linem@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC5checkMFNbPvZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC5queryMFNbPvZSQCq6memory8BlkInfo_@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC5statsMFNbNiNfZSQCs6memory2GC5Stats@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC6__ctorMFZCQCnQClQCfQCfQCdQCoQBt@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC6__dtorMFZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC6__initZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC6__vtblZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC6addrOfMFNbNiPvZQd@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC6callocMFNbmkxC8TypeInfoZPv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC6enableMFZ2goFNaNbNiNfPSQDaQCyQCsQCsQCqQDb3GcxZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC6enableMFZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC6extendMFNbPvmmxC8TypeInfoZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC6gcLockOSQClQCj8spinlock15AlignedSpinLock@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC6lockNRFNbNiNfZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC6mallocMFNbmkxC8TypeInfoZPv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC6qallocMFNbmkMxC8TypeInfoZSQDd6memory8BlkInfo_@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC6sizeOfMFNbNiPvZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC7__ClassZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC7addRootMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC7clrAttrMFNbPvkZ2goFNbPSQDaQCyQCsQCsQCqQDb3GcxQBikZk@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC7clrAttrMFNbPvkZk@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC7collectMFNbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC7disableMFZ2goFNaNbNiNfPSQDbQCzQCtQCtQCrQDc3GcxZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC7disableMFZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC7getAttrMFNbPvZ2goFNbPSQCzQCxQCrQCrQCpQDa3GcxQBhZk@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC7getAttrMFNbPvZk@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC7reallocMFNbPvmkxC8TypeInfoZQq@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC7reserveMFNbmZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC7setAttrMFNbPvkZ2goFNbPSQDaQCyQCsQCsQCqQDb3GcxQBikZk@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC7setAttrMFNbPvkZk@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC8addRangeMFNbNiPvmxC8TypeInfoZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC8minimizeMFNbZ2goFNbPSQCyQCwQCqQCqQCoQCz3GcxZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC8minimizeMFNbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC8rootIterMFNdNiZDFMDFNbKSQDbQCq11gcinterface4RootZiZi@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC9isPreciseb@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC9rangeIterMFNdNiZDFMDFNbKSQDcQCr11gcinterface5RangeZiZi@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy10freeNoSyncMFNbNiPvZvS_DQEmQEkQEeQEeQEcQEn8freeTimelS_DQFrQFpQFjQFjQFhQFs8numFreeslTQCpZQEtMFNbNiKQDdZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy11checkNoSyncMFNbPvZvS_DQElQEjQEdQEdQEbQEm9otherTimelS_DQFrQFpQFjQFjQFhQFs9numOtherslTQCrZQEuMFNbKQDdZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy11fullCollectMFNbZ2goFNbPSQEnQElQEfQEfQEdQEo3GcxZmTQBbZQDlMFNbKQBnZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy11queryNoSyncMFNbPvZSQEi6memory8BlkInfo_S_DQFeQFcQEwQEwQEuQFf9otherTimelS_DQGkQGiQGcQGcQGaQGl9numOtherslTQDkZQFnMFNbKQDwZQDx@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy12addrOfNoSyncMFNbNiPvZQdS_DQEpQEnQEhQEhQEfQEq9otherTimelS_DQFvQFtQFnQFnQFlQFw9numOtherslTQCsZQEyMFNbNiKQDgZQDk@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy12extendNoSyncMFNbPvmmxC8TypeInfoZmS_DQEzQExQErQErQEpQFa10extendTimelS_DQGhQGfQFzQFzQFxQGi10numExtendslTQDiTmTmTxQDmZQFvMFNbKQEdKmKmKxQEhZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy12mallocNoSyncMFNbmkKmxC8TypeInfoZPvS_DQFaQEyQEsQEsQEqQFb10mallocTimelS_DQGiQGgQGaQGaQFyQGj10numMallocslTmTkTmTxQDlZQFuMFNbKmKkKmKxQEeZQDx@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy12sizeOfNoSyncMFNbNiPvZmS_DQEoQEmQEgQEgQEeQEp9otherTimelS_DQFuQFsQFmQFmQFkQFv9numOtherslTQCrZQExMFNbNiKQDfZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy13reallocNoSyncMFNbPvmKkKmxC8TypeInfoZQtS_DQFeQFcQEwQEwQEuQFf10mallocTimelS_DQGmQGkQGeQGeQGcQGn10numMallocslTQDmTmTkTmTxQDpZQGcMFNbKQEjKmKkKmKxQEmZQEy@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy13reserveNoSyncMFNbmZmS_DQEmQEkQEeQEeQEcQEn9otherTimelS_DQFsQFqQFkQFkQFiQFt9numOtherslTmZQEtMFNbKmZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy13runFinalizersMFNbMxAvZ2goFNbPSQEtQErQElQElQEjQEu3GcxMxQBjZvS_DQFzQFxQFrQFrQFpQGa9otherTimelS_DQHfQHdQGxQGxQGvQHg9numOtherslTQDsTxQEgZQGnMFNbKQEjKxQExZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy14getStatsNoSyncMFNbNiNeJSQEn6memory2GC5StatsZvS_DQFlQFjQFdQFdQFbQFm9otherTimelS_DQGrQGpQGjQGjQGhQGs9numOtherslTQDjZQFuMFNbNiNfKQDzZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy18fullCollectNoStackMFNbZ2goFNbPSQEuQEsQEmQEmQEkQEv3GcxZmTQBbZQDsMFNbKQBnZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy6enableMFZ2goFNaNbNiNfPSQElQEjQEdQEdQEbQEm3GcxZvS_DQFmQFkQFeQFeQFcQFn9otherTimelS_DQGsQGqQGkQGkQGiQGt9numOtherslTQDnZQFvMFNbNiNfKQEdZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy7clrAttrMFNbPvkZ2goFNbPSQElQEjQEdQEdQEbQEm3GcxQBikZkS_DQFqQFoQFiQFiQFgQFr9otherTimelS_DQGwQGuQGoQGoQGmQGx9numOtherslTQDrTQEfTkZQGfMFNbKQEjKQExKkZk@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy7disableMFZ2goFNaNbNiNfPSQEmQEkQEeQEeQEcQEn3GcxZvS_DQFnQFlQFfQFfQFdQFo9otherTimelS_DQGtQGrQGlQGlQGjQGu9numOtherslTQDnZQFwMFNbNiNfKQEdZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy7getAttrMFNbPvZ2goFNbPSQEkQEiQEcQEcQEaQEl3GcxQBhZkS_DQFoQFmQFgQFgQFeQFp9otherTimelS_DQGuQGsQGmQGmQGkQGv9numOtherslTQDqTQEdZQGbMFNbKQEgKQEtZk@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy7setAttrMFNbPvkZ2goFNbPSQElQEjQEdQEdQEbQEm3GcxQBikZkS_DQFqQFoQFiQFiQFgQFr9otherTimelS_DQGwQGuQGoQGoQGmQGx9numOtherslTQDrTQEfTkZQGfMFNbKQEjKQExKkZk@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy8minimizeMFNbZ2goFNbPSQEjQEhQEbQEbQDzQEk3GcxZvS_DQFkQFiQFcQFcQFaQFl9otherTimelS_DQGqQGoQGiQGiQGgQGr9numOtherslTQDnZQFtMFNbKQDzZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14SENTINEL_EXTRAxk@Base 12
+ _D4core8internal2gc4impl12conservativeQw14baseOffsetBitsyG14G4m@Base 12
+ _D4core8internal2gc4impl12conservativeQw14bytesAllocatedm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14numCollectionsm@Base 12
+ _D4core8internal2gc4impl12conservativeQw15LargeObjectPool10allocPagesMFNbmZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw15LargeObjectPool13runFinalizersMFNbMxAvZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw15LargeObjectPool18setFreePageOffsetsMFNbNimmZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw15LargeObjectPool6__initZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw15LargeObjectPool7getInfoMFNbPvZSQCt6memory8BlkInfo_@Base 12
+ _D4core8internal2gc4impl12conservativeQw15LargeObjectPool7getSizeMxFNbNimZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw15LargeObjectPool8getPagesMxFNbNiPvZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw15LargeObjectPool9freePagesMFNbNimmZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw15LargeObjectPool__T20mergeFreePageOffsetsVbi0Vbi1ZQBfMFNbNimmZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw15LargeObjectPool__T20mergeFreePageOffsetsVbi1Vbi1ZQBfMFNbNimmZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw15SmallObjectPool13runFinalizersMFNbMxAvZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw15SmallObjectPool6__initZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw15SmallObjectPool7getInfoMFNbPvZSQCt6memory8BlkInfo_@Base 12
+ _D4core8internal2gc4impl12conservativeQw15SmallObjectPool7getSizeMxFNbNiPvZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw15SmallObjectPool9allocPageMFNbEQCsQCqQCkQCkQCiQCt4BinsZPSQDsQDqQDkQDkQDiQDt4List@Base 12
+ _D4core8internal2gc4impl12conservativeQw17maxCollectionTimeSQCg4time8Duration@Base 12
+ _D4core8internal2gc4impl12conservativeQw18initialize_preciseFZCQCjQBy11gcinterface2GC@Base 12
+ _D4core8internal2gc4impl12conservativeQw18sentinel_InvariantFNbNixPvZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx10initializeMFZ23atforkHandlersInstalledb@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx10initializeMFZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx10removeRootMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx10rootsApplyMFNbMDFNbKSQCnQCc11gcinterface4RootZiZi@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx10smallAllocMFNbmKmkxC8TypeInfoZPv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx11__fieldDtorMFNbNiZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx11__xopEqualsMxFKxSQCjQChQCbQCbQBzQCkQBpZb@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx11collectForkMFNbbZEQCkQCiQCc2os11ChildStatus@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx11disableForkMFNbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx11fullcollectMFNbbbbZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx11rangesApplyMFNbMDFNbKSQCoQCd11gcinterface5RangeZiZi@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx11recoverPageMFNbPSQCjQChQCbQCbQBzQCk15SmallObjectPoolmEQDuQDsQDmQDmQDkQDv4BinsZb@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx11removeRangeMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx12collectRootsMFNbNlPvQcZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx12markParallelMFNbbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx13runFinalizersMFNbMxAvZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx14ScanThreadData6__initZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx14scanBackgroundMFNbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx15collectAllRootsMFNbbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx15fork_needs_lockb@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx15recoverNextPageMFNbEQCmQCkQCeQCeQCcQCn4BinsZb@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx15stopScanThreadsMFNbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx16startScanThreadsMFNbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx17collectInProgressMxFNbNdZb@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx17pullFromScanStackMFNbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx18maxParallelThreadsMFNbZi@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx18setNextRecoverPoolMFNbEQCpQCnQChQChQCfQCq4BinsmZPSQDqQDoQDiQDiQDgQDr15SmallObjectPool@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx19_d_gcx_atfork_childUZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx20_d_gcx_atfork_parentUZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx21_d_gcx_atfork_prepareUZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx23updateCollectThresholdsMFNbZ11smoothDecayFNaNbNiNfffZf@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx23updateCollectThresholdsMFNbZ3maxFNaNbNiNfffZf@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx23updateCollectThresholdsMFNbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx4DtorMFZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx5allocMFNbmKmkxC8TypeInfoZPv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx5sweepMFNbZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx6__initZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx6lowMemMxFNbNdZb@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx7addRootMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx7getInfoMFNbPvZSQCg6memory8BlkInfo_@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx7newPoolMFNbmbZPSQChQCfQBzQBzQBxQCi4Pool@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx7prepareMFNbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx7reserveMFNbmZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx8addRangeMFNbNiPvQcxC8TypeInfoZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx8bigAllocMFNbmKmkxC8TypeInfoZPv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx8binTableyG2049EQCgQCeQByQByQBwQCh4Bins@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx8ctfeBinsFNbZG2049EQCjQChQCbQCbQBzQCk4Bins@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx8findBaseMFNbNiPvZQd@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx8findPoolMFNaNbNiPvZPSQCmQCkQCeQCeQCcQCn4Pool@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx8findSizeMFNbNiPvZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx8instancePSQCbQBzQBtQBtQBrQCcQBh@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx8isMarkedMFNbNlPvZi@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx8markForkMFNbbbbZ13wrap_delegateUPvZi@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx8markForkMFNbbbbZEQCiQCgQCa2os11ChildStatus@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx8minimizeMFNbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx9InvariantMxFZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx9__xtoHashFNbNeKxSQCiQCgQCaQCaQByQCjQBoZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx9allocPageMFNbEQCfQCdQBxQBxQBvQCg4BinsZPSQDfQDdQCxQCxQCvQDg4List@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTPvZQr14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTPvZQr3popMFNaNbNiZQs@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTPvZQr4growMFNbNiZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTPvZQr4pushMFNbNiQqZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTPvZQr5clearMFNaNbNiNfZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTPvZQr5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTPvZQr5resetMFNbNiZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTPvZQr6__initZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTPvZQr6lengthMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTPvZQr7opIndexMNgFNaNbNcNimZNgPv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTPvZQr8opAssignMFNaNbNcNiNjNeSQDkQDiQDcQDcQDaQDlQCq__TQCpTQCfZQCxZQBl@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTPvZQr9popLockedMFNbNiKQwZb@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi0ZQpZQCf14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi0ZQpZQCf3popMFNaNbNiZQCh@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi0ZQpZQCf4growMFNbNiZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi0ZQpZQCf4pushMFNbNiQCfZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi0ZQpZQCf5clearMFNaNbNiNfZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi0ZQpZQCf5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi0ZQpZQCf5resetMFNbNiZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi0ZQpZQCf6__initZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi0ZQpZQCf6lengthMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi0ZQpZQCf7opIndexMNgFNaNbNcNimZNgSQFaQEyQEsQEsQEqQFbQEg__TQCsVbi0ZQDa@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi0ZQpZQCf8opAssignMFNaNbNcNiNjNeSQEzQExQErQErQEpQFaQEf__TQEeTQDuZQEmZQBl@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi0ZQpZQCf9popLockedMFNbNiKQClZb@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi1ZQpZQCf14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi1ZQpZQCf3popMFNaNbNiZQCh@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi1ZQpZQCf4growMFNbNiZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi1ZQpZQCf4pushMFNbNiQCfZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi1ZQpZQCf5clearMFNaNbNiNfZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi1ZQpZQCf5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi1ZQpZQCf5resetMFNbNiZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi1ZQpZQCf6__initZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi1ZQpZQCf6lengthMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi1ZQpZQCf7opIndexMNgFNaNbNcNimZNgSQFaQEyQEsQEsQEqQFbQEg__TQCsVbi1ZQDa@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi1ZQpZQCf8opAssignMFNaNbNcNiNjNeSQEzQExQErQErQEpQFaQEf__TQEeTQDuZQEmZQBl@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi1ZQpZQCf9popLockedMFNbNiKQClZb@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11markPreciseVbi0ZQsMFNbNiNlPvQcZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11markPreciseVbi1ZQsMFNbNiNlPvQcZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T16markConservativeVbi0ZQxMFNbNiNlPvQcZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T16markConservativeVbi1ZQxMFNbNiNlPvQcZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T21pullFromScanStackImplVbi0ZQBcMFNbNiZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T21pullFromScanStackImplVbi1ZQBcMFNbNiZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T4markVbi0Vbi0Vbi0ZQsMFNbNiNlSQCwQCuQCoQCoQCmQCxQCc__T9ScanRangeVbi0ZQpZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T4markVbi0Vbi0Vbi1ZQsMFNbNiNlSQCwQCuQCoQCoQCmQCxQCc__T9ScanRangeVbi0ZQpZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T4markVbi0Vbi1Vbi1ZQsMFNbNiNlSQCwQCuQCoQCoQCmQCxQCc__T9ScanRangeVbi0ZQpZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T4markVbi1Vbi0Vbi0ZQsMFNbNiNlSQCwQCuQCoQCoQCmQCxQCc__T9ScanRangeVbi1ZQpZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T4markVbi1Vbi0Vbi1ZQsMFNbNiNlSQCwQCuQCoQCoQCmQCxQCc__T9ScanRangeVbi1ZQpZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T4markVbi1Vbi1Vbi1ZQsMFNbNiNlSQCwQCuQCoQCoQCmQCxQCc__T9ScanRangeVbi1ZQpZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T7markAllS_DQCeQCcQBwQBwQBuQCfQBk__T11markPreciseVbi0ZQsMFNbNiNlPvQcZvZQCsMFNbbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T7markAllS_DQCeQCcQBwQBwQBuQCfQBk__T11markPreciseVbi1ZQsMFNbNiNlPvQcZvZQCsMFNbbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T7markAllS_DQCeQCcQBwQBwQBuQCfQBk__T16markConservativeVbi0ZQxMFNbNiNlPvQcZvZQCxMFNbbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T7markAllS_DQCeQCcQBwQBwQBuQCfQBk__T16markConservativeVbi1ZQxMFNbNiNlPvQcZvZQCxMFNbbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T9ScanRangeVbi0ZQp6__initZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T9ScanRangeVbi1ZQp6__initZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw3setFNaNbNiKG4mmZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw4List6__initZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool10initializeMFNbmbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool12freePageBitsMFNbmKxG4mZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool15freeAllPageBitsMFNbmZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool16setPointerBitmapMFNbPvmmxC8TypeInfokZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool21setPointerBitmapSmallMFNbPvmmkxC8TypeInfoZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool4DtorMFNbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool6__initZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool6isFreeMxFNaNbNdNiNlNfZb@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool7ShiftBy6__initZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool7clrBitsMFNbNimkZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool7getBitsMFNbmZk@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool7setBitsMFNbmkZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool8findBaseMFNbNiPvZQd@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool8numPagesFNbNimZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool9InvariantMxFZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool9pagenumOfMxFNbNiPvZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool9slGetInfoMFNbPvZSQCj6memory8BlkInfo_@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool9slGetSizeMFNbNiPvZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw7binbaseyG15G256s@Base 12
+ _D4core8internal2gc4impl12conservativeQw7binsizeyG15s@Base 12
+ _D4core8internal2gc4impl12conservativeQw8freeTimel@Base 12
+ _D4core8internal2gc4impl12conservativeQw8lockTimel@Base 12
+ _D4core8internal2gc4impl12conservativeQw8markTimeSQBw4time8Duration@Base 12
+ _D4core8internal2gc4impl12conservativeQw8numFreesl@Base 12
+ _D4core8internal2gc4impl12conservativeQw8prepTimeSQBw4time8Duration@Base 12
+ _D4core8internal2gc4impl12conservativeQw9numOthersl@Base 12
+ _D4core8internal2gc4impl12conservativeQw9otherTimel@Base 12
+ _D4core8internal2gc4impl12conservativeQw9pauseTimeSQBx4time8Duration@Base 12
+ _D4core8internal2gc4impl12conservativeQw9sweepTimeSQBx4time8Duration@Base 12
+ _D4core8internal2gc4impl5protoQo11__moduleRefZ@Base 12
+ _D4core8internal2gc4impl5protoQo12__ModuleInfoZ@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC10removeRootMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC10rootsApplyMFMDFNbKSQChQBw11gcinterface4RootZiZi@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC11__fieldDtorMFNbNiZv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC11inFinalizerMFNbNiNfZb@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC11rangesApplyMFMDFNbKSQCiQBx11gcinterface5RangeZiZi@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC11removeRangeMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC12profileStatsMFNbNiNfZSQCk6memory2GC12ProfileStats@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC13runFinalizersMFNbMxAvZv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC14collectNoStackMFNbZv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC22transferRangesAndRootsMFZv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC24allocatedInCurrentThreadMFNbZm@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC4DtorMFZv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC4freeMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC5queryMFNbPvZSQCa6memory8BlkInfo_@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC5statsMFNbNiNfZSQCc6memory2GC5Stats@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC6__ctorMFZCQBxQBvQBpQBpQBnQByQBl@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC6__initZ@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC6__vtblZ@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC6addrOfMFNbNiPvZQd@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC6callocMFNbmkMxC8TypeInfoZPv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC6enableMFZv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC6extendMFNbPvmmMxC8TypeInfoZm@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC6mallocMFNbmkMxC8TypeInfoZPv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC6qallocMFNbmkMxC8TypeInfoZSQCn6memory8BlkInfo_@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC6sizeOfMFNbNiPvZm@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC7__ClassZ@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC7addRootMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC7clrAttrMFNbPvkZk@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC7collectMFNbZv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC7disableMFZv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC7getAttrMFNbPvZk@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC7reallocMFNbPvmkMxC8TypeInfoZQr@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC7reserveMFNbmZm@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC7setAttrMFNbPvkZk@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC8addRangeMFNbNiPvmxC8TypeInfoZv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC8minimizeMFNbZv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC8rootIterMFNdNiNjZDFMDFNbKSQCnQCc11gcinterface4RootZiZi@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC9rangeIterMFNdNiNjZDFMDFNbKSQCoQCd11gcinterface5RangeZiZi@Base 12
+ _D4core8internal2gc4impl6manualQp10initializeFZCQBuQBj11gcinterface2GC@Base 12
+ _D4core8internal2gc4impl6manualQp11__moduleRefZ@Base 12
+ _D4core8internal2gc4impl6manualQp12__ModuleInfoZ@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC10__aggrDtorMFZv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC10removeRootMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC10rootsApplyMFMDFNbKSQCjQBy11gcinterface4RootZiZi@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC11__fieldDtorMFNbNiZv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC11inFinalizerMFNbNiNfZb@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC11rangesApplyMFMDFNbKSQCkQBz11gcinterface5RangeZiZi@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC11removeRangeMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC12profileStatsMFNbNiNfZSQCm6memory2GC12ProfileStats@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC13runFinalizersMFNbMxAvZv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC14collectNoStackMFNbZv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC24allocatedInCurrentThreadMFNbZm@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC4freeMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC5queryMFNbPvZSQCc6memory8BlkInfo_@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC5statsMFNbNiNfZSQCe6memory2GC5Stats@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC6__ctorMFZCQBzQBxQBrQBrQBpQCaQBm@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC6__dtorMFZv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC6__initZ@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC6__vtblZ@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC6addrOfMFNbNiPvZQd@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC6callocMFNbmkxC8TypeInfoZPv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC6enableMFZv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC6extendMFNbPvmmxC8TypeInfoZm@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC6mallocMFNbmkxC8TypeInfoZPv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC6qallocMFNbmkMxC8TypeInfoZSQCp6memory8BlkInfo_@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC6sizeOfMFNbNiPvZm@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC7__ClassZ@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC7addRootMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC7clrAttrMFNbPvkZk@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC7collectMFNbZv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC7disableMFZv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC7getAttrMFNbPvZk@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC7reallocMFNbPvmkxC8TypeInfoZQq@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC7reserveMFNbmZm@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC7setAttrMFNbPvkZk@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC8addRangeMFNbNiPvmxC8TypeInfoZv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC8minimizeMFNbZv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC8rootIterMFNdNiNjZDFMDFNbKSQCpQCe11gcinterface4RootZiZi@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC9rangeIterMFNdNiNjZDFMDFNbKSQCqQCf11gcinterface5RangeZiZi@Base 12
+ _D4core8internal2gc5proxy11__moduleRefZ@Base 12
+ _D4core8internal2gc5proxy12__ModuleInfoZ@Base 12
+ _D4core8internal2gc5proxy12instanceLockOSQBnQBl8spinlock8SpinLock@Base 12
+ _D4core8internal2gc5proxy14isInstanceInitb@Base 12
+ _D4core8internal2gc5proxy8instanceFNbNiNeZCQBpQBe11gcinterface2GC@Base 12
+ _D4core8internal2gc5proxy9_instanceCQBiQx11gcinterface2GC@Base 12
+ _D4core8internal2gc5proxy9proxiedGCCQBiQx11gcinterface2GC@Base 12
+ _D4core8internal2gc9pooltable11__moduleRefZ@Base 12
+ _D4core8internal2gc9pooltable12__ModuleInfoZ@Base 12
+ _D4core8internal2gc9pooltable__T9PoolTableTSQBqQBoQBi4impl12conservativeQCe4PoolZQBx4DtorMFNbNiZv@Base 12
+ _D4core8internal2gc9pooltable__T9PoolTableTSQBqQBoQBi4impl12conservativeQCe4PoolZQBx6__initZ@Base 12
+ _D4core8internal2gc9pooltable__T9PoolTableTSQBqQBoQBi4impl12conservativeQCe4PoolZQBx6insertMFNbNiPQCdZb@Base 12
+ _D4core8internal2gc9pooltable__T9PoolTableTSQBqQBoQBi4impl12conservativeQCe4PoolZQBx6lengthMxFNaNbNdNiNlNfZm@Base 12
+ _D4core8internal2gc9pooltable__T9PoolTableTSQBqQBoQBi4impl12conservativeQCe4PoolZQBx7maxAddrMxFNaNbNdNiNfZPxv@Base 12
+ _D4core8internal2gc9pooltable__T9PoolTableTSQBqQBoQBi4impl12conservativeQCe4PoolZQBx7minAddrMxFNaNbNdNiNfZPxv@Base 12
+ _D4core8internal2gc9pooltable__T9PoolTableTSQBqQBoQBi4impl12conservativeQCe4PoolZQBx7opIndexMNgFNaNbNcNiNjNemZNgPSQEiQEgQEaQCsQCqQEjQCf@Base 12
+ _D4core8internal2gc9pooltable__T9PoolTableTSQBqQBoQBi4impl12conservativeQCe4PoolZQBx7opSliceMNgFNaNbNiNjNeZANgPSQEgQEeQDyQCqQCoQEhQCd@Base 12
+ _D4core8internal2gc9pooltable__T9PoolTableTSQBqQBoQBi4impl12conservativeQCe4PoolZQBx7opSliceMNgFNaNbNiNjNemmZANgPSQEiQEgQEaQCsQCqQEjQCf@Base 12
+ _D4core8internal2gc9pooltable__T9PoolTableTSQBqQBoQBi4impl12conservativeQCe4PoolZQBx8findPoolMFNaNbNiPvZPQCk@Base 12
+ _D4core8internal2gc9pooltable__T9PoolTableTSQBqQBoQBi4impl12conservativeQCe4PoolZQBx8minimizeMFNaNbNiZ4swapFNaNbNiNfKPQCxKQfZv@Base 12
+ _D4core8internal2gc9pooltable__T9PoolTableTSQBqQBoQBi4impl12conservativeQCe4PoolZQBx8minimizeMFNaNbNiZAPQCj@Base 12
+ _D4core8internal2gc9pooltable__T9PoolTableTSQBqQBoQBi4impl12conservativeQCe4PoolZQBx9InvariantMxFNaNbNiZv@Base 12
+ _D4core8internal3utf10UTF8strideyAi@Base 12
+ _D4core8internal3utf10toUCSindexFNaNbNiNfMxAwmZm@Base 12
+ _D4core8internal3utf10toUCSindexFNaNfMxAamZm@Base 12
+ _D4core8internal3utf10toUCSindexFNaNfMxAumZm@Base 12
+ _D4core8internal3utf10toUTFindexFNaNbNiNfMxAumZm@Base 12
+ _D4core8internal3utf10toUTFindexFNaNbNiNfMxAwmZm@Base 12
+ _D4core8internal3utf10toUTFindexFNaNfMxAamZm@Base 12
+ _D4core8internal3utf11__moduleRefZ@Base 12
+ _D4core8internal3utf12__ModuleInfoZ@Base 12
+ _D4core8internal3utf12isValidDcharFNaNbNiNfwZb@Base 12
+ _D4core8internal3utf6decodeFNaNfMxAaKmZw@Base 12
+ _D4core8internal3utf6decodeFNaNfMxAuKmZw@Base 12
+ _D4core8internal3utf6decodeFNaNfMxAwKmZw@Base 12
+ _D4core8internal3utf6encodeFNaNbNfKAawZv@Base 12
+ _D4core8internal3utf6encodeFNaNbNfKAuwZv@Base 12
+ _D4core8internal3utf6encodeFNaNbNfKAwwZv@Base 12
+ _D4core8internal3utf6strideFNaNbNiNfMxAamZk@Base 12
+ _D4core8internal3utf6strideFNaNbNiNfMxAumZk@Base 12
+ _D4core8internal3utf6strideFNaNbNiNfMxAwmZk@Base 12
+ _D4core8internal3utf6toUTF8FNaNbNfNkMAyaZQe@Base 12
+ _D4core8internal3utf6toUTF8FNaNbNiNfNkMAawZQe@Base 12
+ _D4core8internal3utf6toUTF8FNaNeMxAuZAya@Base 12
+ _D4core8internal3utf6toUTF8FNaNeMxAwZAya@Base 12
+ _D4core8internal3utf7toUTF16FNaNbNeMxAwZAyu@Base 12
+ _D4core8internal3utf7toUTF16FNaNbNfNkMAyuZQe@Base 12
+ _D4core8internal3utf7toUTF16FNaNbNiNfNkMAuwZQe@Base 12
+ _D4core8internal3utf7toUTF16FNaNeMxAaZAyu@Base 12
+ _D4core8internal3utf7toUTF32FNaNbNfNkMAywZQe@Base 12
+ _D4core8internal3utf7toUTF32FNaNeMxAaZAyw@Base 12
+ _D4core8internal3utf7toUTF32FNaNeMxAuZAyw@Base 12
+ _D4core8internal3utf8toUTF16zFNaNfMxAaZPxu@Base 12
+ _D4core8internal3utf__T8validateTAyaZQoFNaNfMxAyaZv@Base 12
+ _D4core8internal3utf__T8validateTAyuZQoFNaNfMxAyuZv@Base 12
+ _D4core8internal3utf__T8validateTAywZQoFNaNfMxAywZv@Base 12
+ _D4core8internal4hash11__moduleRefZ@Base 12
+ _D4core8internal4hash12__ModuleInfoZ@Base 12
+ _D4core8internal4hash__T13coalesceFloatTdZQsFNaNbNiNfxdZd@Base 12
+ _D4core8internal4hash__T13coalesceFloatTeZQsFNaNbNiNfxeZe@Base 12
+ _D4core8internal4hash__T13coalesceFloatTfZQsFNaNbNiNfxfZf@Base 12
+ _D4core8internal4hash__T6hashOfTAxE2rt4util7utility16__c_complex_realZQBuFNaNbNiNfQBymZm@Base 12
+ _D4core8internal4hash__T6hashOfTAxE2rt4util7utility17__c_complex_floatZQBvFNaNbNiNfQBzmZm@Base 12
+ _D4core8internal4hash__T6hashOfTAxE2rt4util7utility18__c_complex_doubleZQBwFNaNbNiNfQCamZm@Base 12
+ _D4core8internal4hash__T6hashOfTAxPvZQnFNaNbNiNfMxAQrmZm@Base 12
+ _D4core8internal4hash__T6hashOfTAxPyS6object10ModuleInfoZQBhFNaNbNiNfMxAQBmmZm@Base 12
+ _D4core8internal4hash__T6hashOfTAxaZQmFNaNbNiNfMxAamZm@Base 12
+ _D4core8internal4hash__T6hashOfTAxdZQmFNaNbNiNfMxAdmZm@Base 12
+ _D4core8internal4hash__T6hashOfTAxeZQmFNaNbNiNfMxAemZm@Base 12
+ _D4core8internal4hash__T6hashOfTAxfZQmFNaNbNiNfMxAfmZm@Base 12
+ _D4core8internal4hash__T6hashOfTAxhZQmFNaNbNiNfMxAhmZm@Base 12
+ _D4core8internal4hash__T6hashOfTAxkZQmFNaNbNiNfMxAkmZm@Base 12
+ _D4core8internal4hash__T6hashOfTAxmZQmFNaNbNiNfMxAmmZm@Base 12
+ _D4core8internal4hash__T6hashOfTAxtZQmFNaNbNiNfMxAtmZm@Base 12
+ _D4core8internal4hash__T6hashOfTAxvZQmFNaNbNiNfMxAvmZm@Base 12
+ _D4core8internal4hash__T6hashOfTAyaZQmFNaNbNiNfMxAyamZm@Base 12
+ _D4core8internal4hash__T6hashOfTDFZvZQnFNaNbNiNeMxDQsmZm@Base 12
+ _D4core8internal4hash__T6hashOfTPvZQlFNaNbNiNeMxPvmZm@Base 12
+ _D4core8internal4hash__T6hashOfTPxvZQmFNaNbNiNeMxPvZm@Base 12
+ _D4core8internal4hash__T6hashOfTS2rt4util7utility__T8_ComplexTdZQmZQBrFNaNbNiNfQBvmZm@Base 12
+ _D4core8internal4hash__T6hashOfTS2rt4util7utility__T8_ComplexTeZQmZQBrFNaNbNiNfQBvmZm@Base 12
+ _D4core8internal4hash__T6hashOfTS2rt4util7utility__T8_ComplexTfZQmZQBrFNaNbNiNfQBvmZm@Base 12
+ _D4core8internal4hash__T6hashOfTdZQkFNaNbNiNexdZm@Base 12
+ _D4core8internal4hash__T6hashOfTdZQkFNaNbNiNexdmZm@Base 12
+ _D4core8internal4hash__T6hashOfTeZQkFNaNbNiNexeZm@Base 12
+ _D4core8internal4hash__T6hashOfTeZQkFNaNbNiNexemZm@Base 12
+ _D4core8internal4hash__T6hashOfTfZQkFNaNbNiNexfZm@Base 12
+ _D4core8internal4hash__T6hashOfTfZQkFNaNbNiNexfmZm@Base 12
+ _D4core8internal4hash__T6hashOfThZQkFNaNbNiNexhZm@Base 12
+ _D4core8internal4hash__T6hashOfTkZQkFNaNbNiNexkZm@Base 12
+ _D4core8internal4hash__T6hashOfTkZQkFNaNbNiNexkmZm@Base 12
+ _D4core8internal4hash__T6hashOfTmZQkFNaNbNiNexmZm@Base 12
+ _D4core8internal4hash__T6hashOfTmZQkFNaNbNiNexmmZm@Base 12
+ _D4core8internal4hash__T6hashOfTtZQkFNaNbNiNextZm@Base 12
+ _D4core8internal4hash__T6hashOfTxE2rt4util7utility16__c_complex_realZQBtFNaNbNiNfKxQBymZm@Base 12
+ _D4core8internal4hash__T6hashOfTxE2rt4util7utility17__c_complex_floatZQBuFNaNbNiNfKxQBzmZm@Base 12
+ _D4core8internal4hash__T6hashOfTxE2rt4util7utility18__c_complex_doubleZQBvFNaNbNiNfKxQCamZm@Base 12
+ _D4core8internal4hash__T9bytesHashVbi0ZQpFNaNbNiNeMAxhmZm@Base 12
+ _D4core8internal4hash__T9bytesHashVbi1ZQpFNaNbNiNeMAxhmZm@Base 12
+ _D4core8internal4hash__T9get32bitsZQlFNaNbNiMPxhZk@Base 12
+ _D4core8internal4util4math11__moduleRefZ@Base 12
+ _D4core8internal4util4math12__ModuleInfoZ@Base 12
+ _D4core8internal4util4math__T3maxTmZQhFNaNbNiNfmmZm@Base 12
+ _D4core8internal4util4math__T3minTkZQhFNaNbNiNfkkZk@Base 12
+ _D4core8internal4util5array10arrayToPtrFNbNexAvZm@Base 12
+ _D4core8internal4util5array11__moduleRefZ@Base 12
+ _D4core8internal4util5array12__ModuleInfoZ@Base 12
+ _D4core8internal4util5array17_enforceNoOverlapFNbNfxAammxmZv@Base 12
+ _D4core8internal4util5array18_enforceSameLengthFNbNfxAaxmxmZv@Base 12
+ _D4core8internal4util5array21_enforceNoOverlapNogcFNbNfKxAammxmZv@Base 12
+ _D4core8internal4util5array22_enforceSameLengthNogcFNbNfKxAaxmxmZv@Base 12
+ _D4core8internal4util5array27enforceRawArraysConformableFNbNfxAaxmxAvxQdxbZv@Base 12
+ _D4core8internal4util5array31enforceRawArraysConformableNogcFNbNfxAaxmxAvxQdxbZv@Base 12
+ _D4core8internal4util5array6_storeG256a@Base 12
+ _D4core8internal4util5array__T12errorMessageTxmTxmZQvFNbNiNeMxPaxAaxmxmZAa@Base 12
+ _D4core8internal5abort11__moduleRefZ@Base 12
+ _D4core8internal5abort12__ModuleInfoZ@Base 12
+ _D4core8internal5abortQgFNbNiNfMAyaMQemZ8writeStrFNbNiNeMAAxaXv@Base 12
+ _D4core8internal5abortQgFNbNiNfMAyaMQemZv@Base 12
+ _D4core8internal5array10comparison11__moduleRefZ@Base 12
+ _D4core8internal5array10comparison12__ModuleInfoZ@Base 12
+ _D4core8internal5array10comparison__T5__cmpTaZQjFNaNbNiNeMxAaMxQeZi@Base 12
+ _D4core8internal5array10operations10isBinaryOpFNaNbNiNfMAyaZb@Base 12
+ _D4core8internal5array10operations11__moduleRefZ@Base 12
+ _D4core8internal5array10operations12__ModuleInfoZ@Base 12
+ _D4core8internal5array10operations16isBinaryAssignOpFAyaZb@Base 12
+ _D4core8internal5array10operations8toStringFmZAya@Base 12
+ _D4core8internal5array10operations9isUnaryOpFNaNbNiNfMAyaZb@Base 12
+ _D4core8internal5array12construction11__moduleRefZ@Base 12
+ _D4core8internal5array12construction12__ModuleInfoZ@Base 12
+ _D4core8internal5array13concatenation11__moduleRefZ@Base 12
+ _D4core8internal5array13concatenation12__ModuleInfoZ@Base 12
+ _D4core8internal5array5utils11__moduleRefZ@Base 12
+ _D4core8internal5array5utils11gcStatsPureFNaNbZSQBu6memory2GC5Stats@Base 12
+ _D4core8internal5array5utils12__ModuleInfoZ@Base 12
+ _D4core8internal5array5utils14accumulatePureFNaNbAyaiQeQgmZ12impureBypassFNbNiQBdiQBhQBkmZm@Base 12
+ _D4core8internal5array5utils14accumulatePureFNaNbAyaiQeQgmZm@Base 12
+ _D4core8internal5array5utils__T16_d_HookTraceImplTACQBy6thread10threadbase10ThreadBaseS_DQDjQDhQDb8capacity__T22_d_arraysetlengthTImplHTQDiTQDlZ18_d_arraysetlengthTFNaNbNeNkMKQEvmZmVAyaa79_43616e6e6f7420726573697a652061727261797320696620636f6d70696c696e6720776974686f757420737570706f727420666f722072756e74696d65207479706520696e666f726d6174696f6e21ZQMfFNaNbNeQGuiQGyNkMKQMhmZm@Base 12
+ _D4core8internal5array5utils__T16_d_HookTraceImplTAOaS_DQCcQCaQBu9appending__T19_d_arrayappendTImplHTQBzTOaZ15_d_arrayappendTFNaNbNcNeMNkKQDkMQDoZQDsVAyaa81_43616e6e6f7420617070656e6420746f20617272617920696620636f6d70696c696e6720776974686f757420737570706f727420666f722072756e74696d65207479706520696e666f726d6174696f6e21ZQLdFNaNbNeQGyiQHcMNkKQLfMQLjZQLn@Base 12
+ _D4core8internal5array5utils__T16_d_HookTraceImplTAOaS_DQCcQCaQBu9appending__T21_d_arrayappendcTXImplHTQCbTOaZ17_d_arrayappendcTXFNaNbNcNeMNkKQDomZQDtVAyaa81_43616e6e6f7420617070656e6420746f20617272617920696620636f6d70696c696e6720776974686f757420737570706f727420666f722072756e74696d65207479706520696e666f726d6174696f6e21ZQLeFNaNbNeQGyiQHcMNkKQLgmZQLl@Base 12
+ _D4core8internal5array5utils__T16_d_HookTraceImplTAOuS_DQCcQCaQBu9appending__T19_d_arrayappendTImplHTQBzTOuZ15_d_arrayappendTFNaNbNcNeMNkKQDkMQDoZQDsVAyaa81_43616e6e6f7420617070656e6420746f20617272617920696620636f6d70696c696e6720776974686f757420737570706f727420666f722072756e74696d65207479706520696e666f726d6174696f6e21ZQLdFNaNbNeQGyiQHcMNkKQLfMQLjZQLn@Base 12
+ _D4core8internal5array5utils__T16_d_HookTraceImplTAOuS_DQCcQCaQBu9appending__T21_d_arrayappendcTXImplHTQCbTOuZ17_d_arrayappendcTXFNaNbNcNeMNkKQDomZQDtVAyaa81_43616e6e6f7420617070656e6420746f20617272617920696620636f6d70696c696e6720776974686f757420737570706f727420666f722072756e74696d65207479706520696e666f726d6174696f6e21ZQLeFNaNbNeQGyiQHcMNkKQLgmZQLl@Base 12
+ _D4core8internal5array5utils__T16_d_HookTraceImplTAaS_DQCbQBzQBt8capacity__T22_d_arraysetlengthTImplHTQCaTaZ18_d_arraysetlengthTFNaNbNeNkMKQDlmZmVAyaa79_43616e6e6f7420726573697a652061727261797320696620636f6d70696c696e6720776974686f757420737570706f727420666f722072756e74696d65207479706520696e666f726d6174696f6e21ZQKvFNaNbNeQGuiQGyNkMKQKxmZm@Base 12
+ _D4core8internal5array5utils__T16_d_HookTraceImplTAwS_DQCbQBzQBt8capacity__T22_d_arraysetlengthTImplHTQCaTwZ18_d_arraysetlengthTFNaNbNeNkMKQDlmZmVAyaa79_43616e6e6f7420726573697a652061727261797320696620636f6d70696c696e6720776974686f757420737570706f727420666f722072756e74696d65207479706520696e666f726d6174696f6e21ZQKvFNaNbNeQGuiQGyNkMKQKxmZm@Base 12
+ _D4core8internal5array7casting11__moduleRefZ@Base 12
+ _D4core8internal5array7casting12__ModuleInfoZ@Base 12
+ _D4core8internal5array8capacity11__moduleRefZ@Base 12
+ _D4core8internal5array8capacity12__ModuleInfoZ@Base 12
+ _D4core8internal5array8capacity__T22_d_arraysetlengthTImplHTACQCi6thread10threadbase10ThreadBaseTQBkZ18_d_arraysetlengthTFNaNbNeNkMKQCumZm@Base 12
+ _D4core8internal5array8capacity__T22_d_arraysetlengthTImplHTAaTaZ18_d_arraysetlengthTFNaNbNeNkMKQBkmZm@Base 12
+ _D4core8internal5array8capacity__T22_d_arraysetlengthTImplHTAwTwZ18_d_arraysetlengthTFNaNbNeNkMKQBkmZm@Base 12
+ _D4core8internal5array8equality11__moduleRefZ@Base 12
+ _D4core8internal5array8equality12__ModuleInfoZ@Base 12
+ _D4core8internal5array8equality__T8__equalsTPxvTQeZQrFNaNbNiNeMxAPvMxQfZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTPyS6object10ModuleInfoTQxZQBkFNaNbNiNeMxAPyQBpMxQiZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTaTaZQoFNaNbNiNeMxAaMxQeZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTdTdZQoFNaNbNiNeMxAdMxQeZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTeTeZQoFNaNbNiNeMxAeMxQeZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTfTfZQoFNaNbNiNeMxAfMxQeZb@Base 12
+ _D4core8internal5array8equality__T8__equalsThThZQoFNaNbNiNeMxAhMxQeZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTiTiZQoFNaNbNiNeMxAiMxQeZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTkTkZQoFNaNbNiNeMxAkMxQeZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTmTmZQoFNaNbNiNeMxAmMxQeZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTtTtZQoFNaNbNiNeMxAtMxQeZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxAyaTxQfZQtFNaNbNiNfMAxQwMQfZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxDFNbC6ObjectZvTxQqZQBeFNaNbNiNfMAxQBiMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxE2rt4util7utility16__c_complex_realTxQBlZQCaFNaNbNiNfMAxQCeMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxE2rt4util7utility17__c_complex_floatTxQBmZQCbFNaNbNiNfMAxQCfMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxE2rt4util7utility18__c_complex_doubleTxQBnZQCcFNaNbNiNfMAxQCgMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxS2rt3aaA6BucketTxQrZQBfFNaNbNiNfMAxQBjMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxSQBs8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks11ReplacementTxQCvZQDkFNaNbNiNfMAxQDoMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxvTxvZQqFNaNbNiNfMAxvMQeZb@Base 12
+ _D4core8internal5array9appending11__moduleRefZ@Base 12
+ _D4core8internal5array9appending12__ModuleInfoZ@Base 12
+ _D4core8internal5array9appending__T19_d_arrayappendTImplHTAOaTOaZ15_d_arrayappendTFNaNbNcNeMNkKQBlMQBpZQBt@Base 12
+ _D4core8internal5array9appending__T19_d_arrayappendTImplHTAOuTOuZ15_d_arrayappendTFNaNbNcNeMNkKQBlMQBpZQBt@Base 12
+ _D4core8internal5array9appending__T21_d_arrayappendcTXImplHTAOaTOaZ17_d_arrayappendcTXFNaNbNcNeMNkKQBnmZQBs@Base 12
+ _D4core8internal5array9appending__T21_d_arrayappendcTXImplHTAOuTOuZ17_d_arrayappendcTXFNaNbNcNeMNkKQBnmZQBs@Base 12
+ _D4core8internal5qsort11__moduleRefZ@Base 12
+ _D4core8internal5qsort12__ModuleInfoZ@Base 12
+ _D4core8internal5qsort7_adSortUNkMAvC8TypeInfoZ3cmpUMxPvMxQeMPvZi@Base 12
+ _D4core8internal6atomic11__moduleRefZ@Base 12
+ _D4core8internal6atomic12__ModuleInfoZ@Base 12
+ _D4core8internal6atomic12simpleFormatFAyaMAQfZQi@Base 12
+ _D4core8internal6atomic5pauseFNaNbNiNeZv@Base 12
+ _D4core8internal6atomic__T10atomicLoadVEQBmQBb11MemoryOrderi0TPOS2rt9critical_18D_CRITICAL_SECTIONZQCvFNaNbNiNePNgPONgSQCcQCcQBvZNgQr@Base 12
+ _D4core8internal6atomic__T10atomicLoadVEQBmQBb11MemoryOrderi0TmZQBmFNaNbNiNePNgmZNgm@Base 12
+ _D4core8internal6atomic__T10atomicLoadVEQBmQBb11MemoryOrderi2TPOS2rt8monitor_7MonitorZQCiFNaNbNiNePNgPONgSQBpQBpQBjZNgQr@Base 12
+ _D4core8internal6atomic__T10atomicLoadVEQBmQBb11MemoryOrderi2TPOS2rt9critical_18D_CRITICAL_SECTIONZQCvFNaNbNiNePNgPONgSQCcQCcQBvZNgQr@Base 12
+ _D4core8internal6atomic__T10atomicLoadVEQBmQBb11MemoryOrderi5TbZQBmFNaNbNiNePNgbZNgb@Base 12
+ _D4core8internal6atomic__T10atomicLoadVEQBmQBb11MemoryOrderi5TiZQBmFNaNbNiNePNgiZNgi@Base 12
+ _D4core8internal6atomic__T10atomicLoadVEQBmQBb11MemoryOrderi5TkZQBmFNaNbNiNePNgkZNgk@Base 12
+ _D4core8internal6atomic__T11atomicStoreVEQBnQBc11MemoryOrderi0TbZQBnFNaNbNiNePbbZv@Base 12
+ _D4core8internal6atomic__T11atomicStoreVEQBnQBc11MemoryOrderi0TmZQBnFNaNbNiNePmmZv@Base 12
+ _D4core8internal6atomic__T11atomicStoreVEQBnQBc11MemoryOrderi3TPOS2rt8monitor_7MonitorZQCjFNaNbNiNePQBlQBoZv@Base 12
+ _D4core8internal6atomic__T11atomicStoreVEQBnQBc11MemoryOrderi3TPOS2rt9critical_18D_CRITICAL_SECTIONZQCwFNaNbNiNePQByQCbZv@Base 12
+ _D4core8internal6atomic__T11atomicStoreVEQBnQBc11MemoryOrderi3TmZQBnFNaNbNiNePmmZv@Base 12
+ _D4core8internal6atomic__T14atomicFetchAddVEQBqQBf11MemoryOrderi5Vbi1TkZQBuFNaNbNiNePkkZk@Base 12
+ _D4core8internal6atomic__T14atomicFetchAddVEQBqQBf11MemoryOrderi5Vbi1TmZQBuFNaNbNiNePmmZm@Base 12
+ _D4core8internal6atomic__T14atomicFetchSubVEQBqQBf11MemoryOrderi5Vbi1TkZQBuFNaNbNiNePkkZk@Base 12
+ _D4core8internal6atomic__T14atomicFetchSubVEQBqQBf11MemoryOrderi5Vbi1TmZQBuFNaNbNiNePmmZm@Base 12
+ _D4core8internal6atomic__T25atomicCompareExchangeImplVEQCbQBq11MemoryOrderi5VQxi5Vbi0ThZQCkFNaNbNiNePhQchZb@Base 12
+ _D4core8internal6atomic__T25atomicCompareExchangeImplVEQCbQBq11MemoryOrderi5VQxi5Vbi0TmZQCkFNaNbNiNePmQcmZb@Base 12
+ _D4core8internal6atomic__T25atomicCompareExchangeImplVEQCbQBq11MemoryOrderi5VQxi5Vbi0TtZQCkFNaNbNiNePtQctZb@Base 12
+ _D4core8internal6atomic__T35atomicCompareExchangeStrongNoResultVEQClQCa11MemoryOrderi5VQxi5ThZQCqFNaNbNiNePhxhhZb@Base 12
+ _D4core8internal6atomic__T35atomicCompareExchangeStrongNoResultVEQClQCa11MemoryOrderi5VQxi5TmZQCqFNaNbNiNePmxmmZb@Base 12
+ _D4core8internal6atomic__T35atomicCompareExchangeStrongNoResultVEQClQCa11MemoryOrderi5VQxi5TtZQCqFNaNbNiNePtxttZb@Base 12
+ _D4core8internal6moving11__moduleRefZ@Base 12
+ _D4core8internal6moving12__ModuleInfoZ@Base 12
+ _D4core8internal6string11__moduleRefZ@Base 12
+ _D4core8internal6string12__ModuleInfoZ@Base 12
+ _D4core8internal6string__T17TempStringNoAllocVhi20ZQz3getMNgFNaNbNiNjNfZANga@Base 12
+ _D4core8internal6string__T17TempStringNoAllocVhi20ZQz6__initZ@Base 12
+ _D4core8internal6string__T18signedToTempStringVki10ZQBaFNaNbNiNflZSQCnQClQCf__T17TempStringNoAllocVhi20ZQz@Base 12
+ _D4core8internal6string__T20unsignedToTempStringVii10ZQBcFNaNbNiNfmNkMAaZQd@Base 12
+ _D4core8internal6string__T20unsignedToTempStringVii16ZQBcFNaNbNiNfmNkMAaZQd@Base 12
+ _D4core8internal6string__T20unsignedToTempStringVki10ZQBcFNaNbNiNfmNkMAaZQd@Base 12
+ _D4core8internal6string__T20unsignedToTempStringVki10ZQBcFNaNbNiNfmZSQCpQCnQCh__T17TempStringNoAllocVhi20ZQz@Base 12
+ _D4core8internal6string__T7dstrcmpZQjFNaNbNiNeMxAaMxQeZi@Base 12
+ _D4core8internal6string__T9numDigitsVki10ZQqFNaNbNiNfmZi@Base 12
+ _D4core8internal6traits11__moduleRefZ@Base 12
+ _D4core8internal6traits12__ModuleInfoZ@Base 12
+ _D4core8internal6traits23__InoutWorkaroundStruct6__initZ@Base 12
+ _D4core8internal7convert10ctfe_allocFNaNbNiNemZ5allocFNaNbNfmZAh@Base 12
+ _D4core8internal7convert10ctfe_allocFNaNbNiNemZAh@Base 12
+ _D4core8internal7convert11__moduleRefZ@Base 12
+ _D4core8internal7convert11shiftrRoundFNaNbNiNfmZm@Base 12
+ _D4core8internal7convert12__ModuleInfoZ@Base 12
+ _D4core8internal7convert5Float6__initZ@Base 12
+ _D4core8internal7convert7binPow2FNaNbNiNfiZ10binPosPow2FNaNbNiNfiZe@Base 12
+ _D4core8internal7convert7binPow2FNaNbNiNfiZe@Base 12
+ _D4core8internal7convert__T20denormalizedMantissaTeZQzFNaNbNiNfekZSQCnQClQCf5Float@Base 12
+ _D4core8internal7convert__T5parseVbi0HTeZQoFNaNbNiNfeZSQCbQBzQBt5Float@Base 12
+ _D4core8internal7convert__T5parseVbi0HTxeZQpFNaNbNiNfxeZSQCdQCbQBv5Float@Base 12
+ _D4core8internal7convert__T7binLog2TeZQlFNaNbNiNfxeZk@Base 12
+ _D4core8internal7convert__T7toUbyteTPxvZQnFNaNbNiNeNkMxAPvZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteTPyS6object10ModuleInfoZQBgFNaNbNiNeNkMxAPyQBoZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteTaZQlFNaNbNiNeNkMxAaZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteTeZQlFNaNbNiNeKxeZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteThZQlFNaNbNiNeNkMxAhZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteTkZQlFNaNbNiNeKxkZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteTkZQlFNaNbNiNeNkMxAkZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteTmZQlFNaNbNiNeKxmZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteTmZQlFNaNbNiNeNkMxAmZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteTtZQlFNaNbNiNeKxtZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteTtZQlFNaNbNiNeNkMxAtZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteTvZQlFNaNbNiNeNkMxAvZAxh@Base 12
+ _D4core8internal7dassert11__moduleRefZ@Base 12
+ _D4core8internal7dassert12__ModuleInfoZ@Base 12
+ _D4core8internal7dassert15invertCompTokenFNaNbNiNfMAyaZQe@Base 12
+ _D4core8internal7dassert16calcFieldOverlapFMxAmZAb@Base 12
+ _D4core8internal7dassert7combineFNaNbNiNfMxAAyaMxQfMxQkZ11formatTupleFNaNbNiNfMAaKmIQBpIbZv@Base 12
+ _D4core8internal7dassert7combineFNaNbNiNfMxAAyaMxQfMxQkZAya@Base 12
+ _D4core8internal7dassert9pureAllocFNaNbNiNfmZAh@Base 12
+ _D4core8internal7dassert9pureAllocFmZ5allocFNaNbNfmZAh@Base 12
+ _D4core8internal7dassert__T20assumeFakeAttributesTPFNaNbNfmZAhZQBkFNaNbNiNeQzZPFNaNbNiNfmZQBe@Base 12
+ _D4core8internal7switch_11__moduleRefZ@Base 12
+ _D4core8internal7switch_12__ModuleInfoZ@Base 12
+ _D4core8internal7switch___T14__switchSearchTyaZQuFNaNbNiNfMxAAyaMxQfZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa0_VxQia5_61626f7274VxQza5_7072696e74VxQBqa6_69676e6f7265VxQCka9_646570726563617465ZQDxFNaNbNiNfMxQDxZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa0_VxQia5_61626f7274ZQBmFNaNbNiNfMxQBmZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa0_VxQia7_70726563697365VxQBda12_636f6e736572766174697665ZQCxFNaNbNiNfMxQCxZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa0_VxQia8_72756e2d6d61696eVxQBfa9_746573742d6f6e6c79VxQCfa12_746573742d6f722d6d61696eZQDzFNaNbNiNfMxQDzZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa0_VxQia8_72756e2d6d61696eZQBsFNaNbNiNfMxQBsZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa0_ZQvFNaNbNiNfMxQuZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa12_636f6e736572766174697665ZQBuFNaNbNiNfMxQBuZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa12_746573742d6f722d6d61696eZQBuFNaNbNiNfMxQBuZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa1_3cVxQka1_3eVxQta2_213dVxQBea2_3c3dVxQBqa2_3d3dVxQCca2_3e3dVxQCoa2_696eVxQDaa2_6973VxQDma3_21696eVxQEaa3_216973ZQFbFNaNbNiNfMxQFbZ5casesyG10Aa@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa1_3cVxQka1_3eVxQta2_213dVxQBea2_3c3dVxQBqa2_3d3dVxQCca2_3e3dVxQCoa2_696eVxQDaa2_6973VxQDma3_21696eVxQEaa3_216973ZQFbFNaNbNiNfMxQFbZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa2_6763VxQma4_666f726bVxQBba7_636c65616e7570VxQBxa7_64697361626c65VxQCta7_70726f66696c65VxQDpa8_706172616c6c656cVxQEna11_696e63506f6f6c53697a65VxQFsa11_696e697452657365727665VxQGxa11_6d6178506f6f6c53697a65VxQIca11_6d696e506f6f6c53697a65VxQJha14_6865617053697a65466163746f72ZQLfFNaNbNiNfMxQLfZ5casesyG11Aa@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa2_6763VxQma4_666f726bVxQBba7_636c65616e7570VxQBxa7_64697361626c65VxQCta7_70726f66696c65VxQDpa8_706172616c6c656cVxQEna11_696e63506f6f6c53697a65VxQFsa11_696e697452657365727665VxQGxa11_6d6178506f6f6c53697a65VxQIca11_6d696e506f6f6c53697a65VxQJha14_6865617053697a65466163746f72ZQLfFNaNbNiNfMxQLfZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa4_6e6f6e65VxQqa7_636f6c6c656374VxQBla8_66696e616c697a65ZQCwFNaNbNiNfMxQCwZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa4_6e6f6e65ZQBdFNaNbNiNfMxQBdZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa6_69676e6f7265VxQua9_646570726563617465ZQCgFNaNbNiNfMxQCgZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa6_69676e6f7265ZQBhFNaNbNiNfMxQBhZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa8_66696e616c697a65ZQBlFNaNbNiNfMxQBlZi@Base 12
+ _D4core8internal7switch___T8__switchTyaZQnFNaNbNiNfMxAyaZi@Base 12
+ _D4core8internal8lifetime11__moduleRefZ@Base 12
+ _D4core8internal8lifetime12__ModuleInfoZ@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTS3gcc8sections3elf9ThreadDSOZQByFNaNbNiNeMKQBrZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTSQBw2gc11gcinterface4RootZQBvFNaNbNiNeMKQBoZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTSQBw2gc11gcinterface5RangeZQBwFNaNbNiNeMKQBpZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTSQBwQBu9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQDoFNaNbNiNeMKQDhZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTSQBwQBu9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQDqFNaNbNiNeMKQDjZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTSQBwQBu9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQDoFNaNbNiNeMKQDhZv@Base 12
+ _D4core8internal8postblit11__moduleRefZ@Base 12
+ _D4core8internal8postblit12__ModuleInfoZ@Base 12
+ _D4core8internal8spinlock11__moduleRefZ@Base 12
+ _D4core8internal8spinlock12__ModuleInfoZ@Base 12
+ _D4core8internal8spinlock15AlignedSpinLock6__ctorMOFNbNcNiNeEQChQCfQBz8SpinLock10ContentionZOSQDoQDmQDgQDa@Base 12
+ _D4core8internal8spinlock15AlignedSpinLock6__initZ@Base 12
+ _D4core8internal8spinlock8SpinLock4lockMOFNbNiNeZv@Base 12
+ _D4core8internal8spinlock8SpinLock5yieldMOFNbNiNemZv@Base 12
+ _D4core8internal8spinlock8SpinLock6__ctorMOFNbNcNiNeEQBzQBxQBrQBl10ContentionZOSQDaQCyQCsQCm@Base 12
+ _D4core8internal8spinlock8SpinLock6__initZ@Base 12
+ _D4core8internal8spinlock8SpinLock6unlockMOFNbNiNeZv@Base 12
+ _D4core8internal9container5array11__moduleRefZ@Base 12
+ _D4core8internal9container5array12__ModuleInfoZ@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk11__invariantMxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk12__invariant0MxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk4backMNgFNaNbNcNdNiZNgAv@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk4swapMFNaNbNiNfKSQCkQCiQCcQBv__TQBsTQBpZQCaZv@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk5frontMNgFNaNbNcNdNiNfZNgAv@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk5resetMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk6__dtorMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk6__initZ@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk6lengthMFNbNdNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk6lengthMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk6removeMFNbNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk7opIndexMNgFNaNbNcNimZNgAv@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk7opSliceMNgFNaNbNiZANgAv@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk7opSliceMNgFNaNbNimmZANgAv@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk7popBackMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk8opAssignMFNbNcNiNjSQCnQClQCfQBy__TQBvTQBsZQCdZQBc@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk__T10insertBackZQnMFNbNiQBdZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf11__invariantMxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf12__invariant0MxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf4backMNgFNaNbNcNdNiZNgPSQBxQBwQBqQBp@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf4swapMFNaNbNiNfKSQDgQDeQCyQCr__TQCoTQClZQCwZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf5frontMNgFNaNbNcNdNiNfZNgPSQCaQBzQBtQBs@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf5resetMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf6__dtorMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf6__initZ@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf6lengthMFNbNdNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf6lengthMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf6removeMFNbNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf7opIndexMNgFNaNbNcNimZNgPSQBzQByQBsQBr@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf7opSliceMNgFNaNbNiZANgPSQBxQBwQBqQBp@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf7opSliceMNgFNaNbNimmZANgPSQBzQByQBsQBr@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf7popBackMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf8opAssignMFNbNcNiNjSQDjQDhQDbQCu__TQCrTQCoZQCzZQBc@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf__T10insertBackZQnMFNbNiKQCaZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu11__invariantMxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu12__invariant0MxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu4backMNgFNaNbNcNdNiZNgPSQFcQFaQEuQDm__TQDhTQDcTQDcZQDtQCl@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu4swapMFNaNbNiNfKSQEvQEtQEnQEg__TQEdTQEaZQElZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu5frontMNgFNaNbNcNdNiNfZNgPSQFfQFdQExQDp__TQDkTQDfTQDfZQDwQCo@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu5resetMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu6__dtorMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu6__initZ@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu6lengthMFNbNdNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu6lengthMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu6removeMFNbNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu7opIndexMNgFNaNbNcNimZNgPSQFeQFcQEwQDo__TQDjTQDeTQDeZQDvQCn@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu7opSliceMNgFNaNbNiZANgPSQFcQFaQEuQDm__TQDhTQDcTQDcZQDtQCl@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu7opSliceMNgFNaNbNimmZANgPSQFeQFcQEwQDo__TQDjTQDeTQDeZQDvQCn@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu7popBackMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu8opAssignMFNbNcNiNjSQEyQEwQEqQEj__TQEgTQEdZQEoZQBc@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw11__invariantMxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw12__invariant0MxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw4backMNgFNaNbNcNdNiZNgPSQFeQFcQEwQDo__TQDjTQDeTQDfZQDvQCl@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw4swapMFNaNbNiNfKSQExQEvQEpQEi__TQEfTQEcZQEnZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw5frontMNgFNaNbNcNdNiNfZNgPSQFhQFfQEzQDr__TQDmTQDhTQDiZQDyQCo@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw5resetMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw6__dtorMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw6__initZ@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw6lengthMFNbNdNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw6lengthMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw6removeMFNbNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw7opIndexMNgFNaNbNcNimZNgPSQFgQFeQEyQDq__TQDlTQDgTQDhZQDxQCn@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw7opSliceMNgFNaNbNiZANgPSQFeQFcQEwQDo__TQDjTQDeTQDfZQDvQCl@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw7opSliceMNgFNaNbNimmZANgPSQFgQFeQEyQDq__TQDlTQDgTQDhZQDxQCn@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw7popBackMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw8opAssignMFNbNcNiNjSQFaQEyQEsQEl__TQEiTQEfZQEqZQBc@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu11__invariantMxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu12__invariant0MxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu4backMNgFNaNbNcNdNiZNgPSQFcQFaQEuQDm__TQDhTQDcTiZQDrQCj@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu4swapMFNaNbNiNfKSQEvQEtQEnQEg__TQEdTQEaZQElZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu5frontMNgFNaNbNcNdNiNfZNgPSQFfQFdQExQDp__TQDkTQDfTiZQDuQCm@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu5resetMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu6__dtorMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu6__initZ@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu6lengthMFNbNdNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu6lengthMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu6removeMFNbNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu7opIndexMNgFNaNbNcNimZNgPSQFeQFcQEwQDo__TQDjTQDeTiZQDtQCl@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu7opSliceMNgFNaNbNiZANgPSQFcQFaQEuQDm__TQDhTQDcTiZQDrQCj@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu7opSliceMNgFNaNbNimmZANgPSQFeQFcQEwQDo__TQDjTQDeTiZQDtQCl@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu7popBackMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu8opAssignMFNbNcNiNjSQEyQEwQEqQEj__TQEgTQEdZQEoZQBc@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk11__invariantMxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk12__invariant0MxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk4backMNgFNaNbNcNdNiZNgSQCcQCbQBvQBu@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk4swapMFNaNbNiNfKSQDlQDjQDdQCw__TQCtTQCqZQDbZv@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk5frontMNgFNaNbNcNdNiNfZNgSQCfQCeQByQBx@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk5resetMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk6__dtorMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk6__initZ@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk6lengthMFNbNdNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk6lengthMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk6removeMFNbNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk7opIndexMNgFNaNbNcNimZNgSQCeQCdQBxQBw@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk7opSliceMNgFNaNbNiZANgSQCcQCbQBvQBu@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk7opSliceMNgFNaNbNimmZANgSQCeQCdQBxQBw@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk7popBackMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk8opAssignMFNbNcNiNjSQDoQDmQDgQCz__TQCwTQCtZQDeZQBc@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk__T10insertBackZQnMFNbNiQCeZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh11__invariantMxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh12__invariant0MxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh4backMNgFNaNbNcNdNiZNgSQDoQBzQBzQBp@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh4swapMFNaNbNiNfKSQDiQDgQDaQCt__TQCqTQCnZQCyZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh5frontMNgFNaNbNcNdNiNfZNgSQDrQCcQCcQBs@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh5resetMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh6__dtorMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh6__initZ@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh6lengthMFNbNdNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh6lengthMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh6removeMFNbNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh7opIndexMNgFNaNbNcNimZNgSQDqQCbQCbQBr@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh7opSliceMNgFNaNbNiZANgSQDoQBzQBzQBp@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh7opSliceMNgFNaNbNimmZANgSQDqQCbQCbQBr@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh7popBackMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh8opAssignMFNbNcNiNjSQDlQDjQDdQCw__TQCtTQCqZQDbZQBc@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh__T10insertBackZQnMFNbNiQCbZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi11__invariantMxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi12__invariant0MxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi4backMNgFNaNbNcNdNiZNgSQDpQCaQCaQBq@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi4swapMFNaNbNiNfKSQDjQDhQDbQCu__TQCrTQCoZQCzZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi5frontMNgFNaNbNcNdNiNfZNgSQDsQCdQCdQBt@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi5resetMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi6__dtorMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi6__initZ@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi6lengthMFNbNdNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi6lengthMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi6removeMFNbNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi7opIndexMNgFNaNbNcNimZNgSQDrQCcQCcQBs@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi7opSliceMNgFNaNbNiZANgSQDpQCaQCaQBq@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi7opSliceMNgFNaNbNimmZANgSQDrQCcQCcQBs@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi7popBackMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi8opAssignMFNbNcNiNjSQDmQDkQDeQCx__TQCuTQCrZQDcZQBc@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi__T10insertBackZQnMFNbNiQCcZv@Base 12
+ _D4core8internal9container5treap11__moduleRefZ@Base 12
+ _D4core8internal9container5treap12__ModuleInfoZ@Base 12
+ _D4core8internal9container5treap4Rand5frontMFNaNbNdNiNfZk@Base 12
+ _D4core8internal9container5treap4Rand6__initZ@Base 12
+ _D4core8internal9container5treap4Rand6opCallMFNaNbNiNfZk@Base 12
+ _D4core8internal9container5treap4Rand8popFrontMFNaNbNiNfZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh10initializeMFNaNbNiNfmZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh13opApplyHelperFNbxPSQDmQDkQDeQCx__TQCuTQCrZQDc4NodeMDFNbKxSQEzQDkQDkQDaZiZi@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh4Node6__initZ@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh6__dtorMFNbNiZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh6__initZ@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh6insertMFNbNiPSQDgQDeQCyQCr__TQCoTQClZQCw4NodeQCxZQBl@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh6insertMFNbNiQBqZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh6removeFNbNiPPSQDgQDeQCyQCr__TQCoTQClZQCw4NodeQCxZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh6removeMFNbNiQBqZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh7opApplyMFNbMDFNbKQBvZiZi@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh7opApplyMxFNbMDFNbKxSQDmQBxQBxQBnZiZi@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh7rotateLFNaNbNiNfPSQDkQDiQDcQCv__TQCsTQCpZQDa4NodeZQBi@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh7rotateRFNaNbNiNfPSQDkQDiQDcQCv__TQCsTQCpZQDa4NodeZQBi@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh8freeNodeFNbNiPSQDhQDfQCzQCs__TQCpTQCmZQCx4NodeZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh8opAssignMFNbNcNiNjSQDlQDjQDdQCw__TQCtTQCqZQDbZQBc@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh9allocNodeMFNbNiQBtZPSQDnQDlQDfQCy__TQCvTQCsZQDd4Node@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh9removeAllFNbNiPSQDiQDgQDaQCt__TQCqTQCnZQCy4NodeZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh9removeAllMFNbNiZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi10initializeMFNaNbNiNfmZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi13opApplyHelperFNbxPSQDnQDlQDfQCy__TQCvTQCsZQDd4NodeMDFNbKxSQFaQDlQDlQDbZiZi@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi4Node11__xopEqualsMxFKxSQDqQDoQDiQDb__TQCyTQCvZQDgQByZb@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi4Node6__initZ@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi4Node9__xtoHashFNbNeKxSQDpQDnQDhQDa__TQCxTQCuZQDfQBxZm@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi6__dtorMFNbNiZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi6__initZ@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi6insertMFNbNiPSQDhQDfQCzQCs__TQCpTQCmZQCx4NodeQCyZQBl@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi6insertMFNbNiQBrZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi6removeFNbNiPPSQDhQDfQCzQCs__TQCpTQCmZQCx4NodeQCyZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi6removeMFNbNiQBrZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi7opApplyMFNbMDFNbKQBwZiZi@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi7opApplyMxFNbMDFNbKxSQDnQByQByQBoZiZi@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi7rotateLFNaNbNiNfPSQDlQDjQDdQCw__TQCtTQCqZQDb4NodeZQBi@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi7rotateRFNaNbNiNfPSQDlQDjQDdQCw__TQCtTQCqZQDb4NodeZQBi@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi8freeNodeFNbNiPSQDiQDgQDaQCt__TQCqTQCnZQCy4NodeZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi8opAssignMFNbNcNiNjSQDmQDkQDeQCx__TQCuTQCrZQDcZQBc@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi9allocNodeMFNbNiQBuZPSQDoQDmQDgQCz__TQCwTQCtZQDe4Node@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi9removeAllFNbNiPSQDjQDhQDbQCu__TQCrTQCoZQCz4NodeZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi9removeAllMFNbNiZv@Base 12
+ _D4core8internal9container6common11__moduleRefZ@Base 12
+ _D4core8internal9container6common12__ModuleInfoZ@Base 12
+ _D4core8internal9container6common7xmallocFNbNimZPv@Base 12
+ _D4core8internal9container6common8xreallocFNbNiPvmZQe@Base 12
+ _D4core8internal9container6common__T10initializeTAvZQqFNaNbNiNfKQpZv@Base 12
+ _D4core8internal9container6common__T10initializeTPS3gcc8sections3elf3DSOZQBlFNaNbNiNfKQBlZv@Base 12
+ _D4core8internal9container6common__T10initializeTPSQBxQBvQBp7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQDaFNaNbNiNfKQDaZv@Base 12
+ _D4core8internal9container6common__T10initializeTPSQBxQBvQBp7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQDcFNaNbNiNfKQDcZv@Base 12
+ _D4core8internal9container6common__T10initializeTPSQBxQBvQBp7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQDaFNaNbNiNfKQDaZv@Base 12
+ _D4core8internal9container6common__T10initializeTS3gcc8sections3elf9ThreadDSOZQBqFNaNbNiNfKQBqZv@Base 12
+ _D4core8internal9container6common__T10initializeTSQBw2gc11gcinterface4RootZQBnFNaNbNiNfKQBnZv@Base 12
+ _D4core8internal9container6common__T10initializeTSQBw2gc11gcinterface5RangeZQBoFNaNbNiNfKQBoZv@Base 12
+ _D4core8internal9container6common__T10initializeTSQBwQBuQBo7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCzFNaNbNiNfKQCzZv@Base 12
+ _D4core8internal9container6common__T10initializeTSQBwQBuQBo7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQDbFNaNbNiNfKQDbZv@Base 12
+ _D4core8internal9container6common__T10initializeTSQBwQBuQBo7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCzFNaNbNiNfKQCzZv@Base 12
+ _D4core8internal9container6common__T7destroyTAvZQmFNaNbNiNfKQpZv@Base 12
+ _D4core8internal9container6common__T7destroyTPS3gcc8sections3elf3DSOZQBhFNaNbNiNfKQBlZv@Base 12
+ _D4core8internal9container6common__T7destroyTPSQBtQBrQBl7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCwFNaNbNiNfKQDaZv@Base 12
+ _D4core8internal9container6common__T7destroyTPSQBtQBrQBl7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCyFNaNbNiNfKQDcZv@Base 12
+ _D4core8internal9container6common__T7destroyTPSQBtQBrQBl7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCwFNaNbNiNfKQDaZv@Base 12
+ _D4core8internal9container6common__T7destroyTS3gcc8sections3elf9ThreadDSOZQBmFNaNbNiNfKQBqZv@Base 12
+ _D4core8internal9container6common__T7destroyTSQBs2gc11gcinterface4RootZQBjFNaNbNiNfKQBnZv@Base 12
+ _D4core8internal9container6common__T7destroyTSQBs2gc11gcinterface5RangeZQBkFNaNbNiNfKQBoZv@Base 12
+ _D4core8internal9container6common__T7destroyTSQBsQBqQBk7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCvFNaNbNiNfKQCzZv@Base 12
+ _D4core8internal9container6common__T7destroyTSQBsQBqQBk7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCxFNaNbNiNfKQDbZv@Base 12
+ _D4core8internal9container6common__T7destroyTSQBsQBqQBk7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCvFNaNbNiNfKQCzZv@Base 12
+ _D4core8internal9container7hashtab11__moduleRefZ@Base 12
+ _D4core8internal9container7hashtab12__ModuleInfoZ@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi10__aggrDtorMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi11__fieldDtorMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi13opIndexAssignMFNbNiQBtQCaZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi18ensureNotInOpApplyMFNaNbNiNfZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi3getMFNbNiQBmZPQBn@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4Node11__xopEqualsMxFKxSQDsQDqQDkQDd__TQCyTQCtTQCtZQDkQCcZb@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4Node6__initZ@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4Node9__xtoHashFNbNeKxSQDrQDpQDjQDc__TQCxTQCsTQCsZQDjQCbZm@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4growMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4maskMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi5resetMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi6__dtorMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi6__initZ@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi6hashOfFNaNbNiNeMKxAaZm@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi6lengthMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi6removeMFNbNiIAaZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi6shrinkMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi7opApplyMFMDFKQBqKQBqZiZi@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi7opIndexMNgFNaNbNcNiQBwZNgSQByQByQBr@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi8opAssignMFNbNcNiNjSQDoQDmQDgQCz__TQCuTQCpTQCpZQDgZQBg@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi__T13opBinaryRightVAyaa2_696eZQBbMNgFNaNbNiMxAaZPNgSQCxQCxQCq@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk10__aggrDtorMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk11__fieldDtorMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk13opIndexAssignMFNbNiQBwQCcZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk18ensureNotInOpApplyMFNaNbNiNfZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk3getMFNbNiQBoZPQBq@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4Node6__initZ@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4growMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4maskMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk5resetMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk6__dtorMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk6__initZ@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk6hashOfFNaNbNiNeMKxPvZm@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk6lengthMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk6removeMFNbNiIPvZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk6shrinkMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk7opApplyMFMDFKQBsKQBtZiZi@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk7opIndexMNgFNaNbNcNiQByZNgPSQCbQCaQBuQBt@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk8opAssignMFNbNcNiNjSQDqQDoQDiQDb__TQCwTQCrTQCsZQDiZQBg@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk__T13opBinaryRightVAyaa2_696eZQBbMNgFNaNbNiMxPvZPNgPSQDaQCzQCtQCs@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi10__aggrDtorMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi11__fieldDtorMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi13opIndexAssignMFNbNiiQByZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi18ensureNotInOpApplyMFNaNbNiNfZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi3getMFNbNiQBmZPi@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4Node6__initZ@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4growMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4maskMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi5resetMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi6__dtorMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi6__initZ@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi6hashOfFNaNbNiNeMKxPyQBvZm@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi6lengthMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi6removeMFNbNiIPyQBqZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi6shrinkMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi7opApplyMFMDFKQBqKiZiZi@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi7opIndexMNgFNaNbNcNiQBwZNgi@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi8opAssignMFNbNcNiNjSQDoQDmQDgQCz__TQCuTQCpTiZQDeZQBe@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi__T13opBinaryRightVAyaa2_696eZQBbMNgFNaNbNiMxPyQCvZPNgi@Base 12
+ _D4core8lifetime11__moduleRefZ@Base 12
+ _D4core8lifetime12__ModuleInfoZ@Base 12
+ _D4core8lifetime__T7emplaceTCQBb4sync5mutex5MutexZQBfFNbNiAvZQBh@Base 12
+ _D4core8lifetime__T7emplaceTCQBb4sync5mutex5MutexZQBfFNbNiNfQBgZQBk@Base 12
+ _D4core8lifetime__T7emplaceTCQBb8internal2gc4impl12conservativeQw14ConservativeGCZQClFQCgZQCk@Base 12
+ _D4core8lifetime__T7emplaceTCQBb8internal2gc4impl6manualQp8ManualGCZQBxFQBsZQBw@Base 12
+ _D4core8lifetime__T7emplaceTCQBb9exception10RangeErrorTAyaTmTnZQBsFNaNbNiNfQBvKQyKmKQxZQCh@Base 12
+ _D4core8lifetime__T7emplaceTCQBb9exception11AssertErrorTAyaTQeTmZQBuFNaNbNiNfQBxKQzKQBcKmZQCk@Base 12
+ _D4core8lifetime__T7emplaceTCQBb9exception11AssertErrorTAyaTmZQBrFNaNbNiNfQBuKQwKmZQCd@Base 12
+ _D4core8lifetime__T7emplaceTCQBb9exception13FinalizeErrorTC8TypeInfoTC6object9ThrowableTAyaTmZQCxFNaNbNiNfQDaKQCaKQBtKQBeKmZQDs@Base 12
+ _D4core8lifetime__T7emplaceTCQBb9exception15ArrayIndexErrorTmTmTAyaTmTnZQCbFNaNbNiNfQCeKmKmKQBcKmKQBcZQCw@Base 12
+ _D4core8lifetime__T7emplaceTCQBb9exception15ArraySliceErrorTmTmTmTAyaTmTnZQCdFNaNbNiNfQCgKmKmKmKQBeKmKQBeZQDa@Base 12
+ _D4core8lifetime__T7emplaceTCQBb9exception16OutOfMemoryErrorTbZQBsFNaNbNiNfQBvKbZQCb@Base 12
+ _D4core8lifetime__T7emplaceTCQBb9exception16OutOfMemoryErrorZQBqFNaNbNiNfQBtZQBx@Base 12
+ _D4core8lifetime__T7emplaceTCQBb9exception27InvalidMemoryOperationErrorZQCbFNaNbNiNfQCeZQCi@Base 12
+ _D4core8lifetime__T7emplaceTCQBb9exception9ForkErrorTAyaTmTnZQBqFNaNbNiNfQBtKQyKmKQxZQCf@Base 12
+ _D4core8volatile11__moduleRefZ@Base 12
+ _D4core8volatile12__ModuleInfoZ@Base 12
+ _D4core9attribute11__moduleRefZ@Base 12
+ _D4core9attribute12__ModuleInfoZ@Base 12
+ _D4core9attribute9gnuAbiTag11__xopEqualsMxFKxSQBsQBqQBjZb@Base 12
+ _D4core9attribute9gnuAbiTag6__ctorMFNcAAyaXSQBqQBoQBh@Base 12
+ _D4core9attribute9gnuAbiTag6__initZ@Base 12
+ _D4core9attribute9gnuAbiTag9__xtoHashFNbNeKxSQBrQBpQBiZm@Base 12
+ _D4core9exception10RangeError6__ctorMFNaNbNiNfAyaQdmC6object9ThrowableZCQCsQCqQCj@Base 12
+ _D4core9exception10RangeError6__ctorMFNaNbNiNfAyamC6object9ThrowableZCQCqQCoQCh@Base 12
+ _D4core9exception10RangeError6__initZ@Base 12
+ _D4core9exception10RangeError6__vtblZ@Base 12
+ _D4core9exception10RangeError7__ClassZ@Base 12
+ _D4core9exception11AssertError6__ctorMFNaNbNiNfAyaQdmC6object9ThrowableZCQCtQCrQCk@Base 12
+ _D4core9exception11AssertError6__ctorMFNaNbNiNfAyamZCQBzQBxQBq@Base 12
+ _D4core9exception11AssertError6__ctorMFNaNbNiNfC6object9ThrowableAyamZCQCrQCpQCi@Base 12
+ _D4core9exception11AssertError6__initZ@Base 12
+ _D4core9exception11AssertError6__vtblZ@Base 12
+ _D4core9exception11AssertError7__ClassZ@Base 12
+ _D4core9exception11SwitchError6__ctorMFNaNbNiNfAyamC6object9ThrowableZCQCrQCpQCi@Base 12
+ _D4core9exception11SwitchError6__initZ@Base 12
+ _D4core9exception11SwitchError6__vtblZ@Base 12
+ _D4core9exception11SwitchError7__ClassZ@Base 12
+ _D4core9exception11__moduleRefZ@Base 12
+ _D4core9exception11rangeMsgPutFNaNbNiNfKAaMAxaZv@Base 12
+ _D4core9exception12__ModuleInfoZ@Base 12
+ _D4core9exception13FinalizeError6__ctorMFNaNbNiNfC8TypeInfoAyamC6object9ThrowableZCQDdQDbQCu@Base 12
+ _D4core9exception13FinalizeError6__ctorMFNaNbNiNfC8TypeInfoC6object9ThrowableAyamZCQDdQDbQCu@Base 12
+ _D4core9exception13FinalizeError6__initZ@Base 12
+ _D4core9exception13FinalizeError6__vtblZ@Base 12
+ _D4core9exception13FinalizeError7__ClassZ@Base 12
+ _D4core9exception13FinalizeError8toStringMxFNfZAya@Base 12
+ _D4core9exception13assertHandlerFNbNdNiNePFNbAyamQeZvZv@Base 12
+ _D4core9exception13assertHandlerFNbNdNiNeZPFNbAyamQeZv@Base 12
+ _D4core9exception14_assertHandlerPFNbAyamQeZv@Base 12
+ _D4core9exception15ArrayIndexError6__ctorMFNaNbNiNfmmAyamC6object9ThrowableZCQCxQCvQCo@Base 12
+ _D4core9exception15ArrayIndexError6__initZ@Base 12
+ _D4core9exception15ArrayIndexError6__vtblZ@Base 12
+ _D4core9exception15ArrayIndexError7__ClassZ@Base 12
+ _D4core9exception15ArraySliceError6__ctorMFNaNbNiNfmmmAyamC6object9ThrowableZCQCyQCwQCp@Base 12
+ _D4core9exception15ArraySliceError6__initZ@Base 12
+ _D4core9exception15ArraySliceError6__vtblZ@Base 12
+ _D4core9exception15ArraySliceError7__ClassZ@Base 12
+ _D4core9exception16OutOfMemoryError13superToStringMFNeZAya@Base 12
+ _D4core9exception16OutOfMemoryError6__ctorMFNaNbNiNfAyamC6object9ThrowableZCQCwQCuQCn@Base 12
+ _D4core9exception16OutOfMemoryError6__ctorMFNaNbNiNfbAyamC6object9ThrowableZCQCxQCvQCo@Base 12
+ _D4core9exception16OutOfMemoryError6__initZ@Base 12
+ _D4core9exception16OutOfMemoryError6__vtblZ@Base 12
+ _D4core9exception16OutOfMemoryError7__ClassZ@Base 12
+ _D4core9exception16OutOfMemoryError8toStringMxFNeZAya@Base 12
+ _D4core9exception16UnicodeException6__ctorMFNaNbNiNfAyamQemC6object9ThrowableZCQCzQCxQCq@Base 12
+ _D4core9exception16UnicodeException6__initZ@Base 12
+ _D4core9exception16UnicodeException6__vtblZ@Base 12
+ _D4core9exception16UnicodeException7__ClassZ@Base 12
+ _D4core9exception17SuppressTraceInfo6__initZ@Base 12
+ _D4core9exception17SuppressTraceInfo6__vtblZ@Base 12
+ _D4core9exception17SuppressTraceInfo7__ClassZ@Base 12
+ _D4core9exception17SuppressTraceInfo7opApplyMxFMDFKmKxAaZiZi@Base 12
+ _D4core9exception17SuppressTraceInfo7opApplyMxFMDFKxAaZiZi@Base 12
+ _D4core9exception17SuppressTraceInfo8instanceFNaNbNiNeZ2ityCQCgQCeQBx@Base 12
+ _D4core9exception17SuppressTraceInfo8instanceFNaNbNiNeZCQCcQCaQBt@Base 12
+ _D4core9exception17SuppressTraceInfo8toStringMxFZAya@Base 12
+ _D4core9exception27InvalidMemoryOperationError13superToStringMFNeZAya@Base 12
+ _D4core9exception27InvalidMemoryOperationError6__ctorMFNaNbNiNfAyamC6object9ThrowableZCQDhQDfQCy@Base 12
+ _D4core9exception27InvalidMemoryOperationError6__initZ@Base 12
+ _D4core9exception27InvalidMemoryOperationError6__vtblZ@Base 12
+ _D4core9exception27InvalidMemoryOperationError7__ClassZ@Base 12
+ _D4core9exception27InvalidMemoryOperationError8toStringMxFNeZAya@Base 12
+ _D4core9exception6_storeG256v@Base 12
+ _D4core9exception9ForkError6__ctorMFNaNbNiNfAyamC6object9ThrowableZCQCoQCmQCf@Base 12
+ _D4core9exception9ForkError6__initZ@Base 12
+ _D4core9exception9ForkError6__vtblZ@Base 12
+ _D4core9exception9ForkError7__ClassZ@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf10RangeErrorTAyaTmTnZQBqFKQnKmQlZ3getFNbNiZQBy@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf10RangeErrorTAyaTmTnZQBqFNaNbNiKQtKmQrZQBu@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf11AssertErrorTAyaTQeTmZQBsFKQoKQrKmZ3getFNbNiZQCb@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf11AssertErrorTAyaTQeTmZQBsFNaNbNiKQuKQxKmZQBx@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf11AssertErrorTAyaTmZQBpFKQlKmZ3getFNbNiZQBv@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf11AssertErrorTAyaTmZQBpFNaNbNiKQrKmZQBr@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf13FinalizeErrorTC8TypeInfoTC6object9ThrowableTAyaTmZQCvFKQBpKQBiKQtKmZ3getFNbNiZQDj@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf13FinalizeErrorTC8TypeInfoTC6object9ThrowableTAyaTmZQCvFNaNbNiKQBvKQBoKQzKmZQDf@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf15ArrayIndexErrorTmTmTAyaTmTnZQBzFKmKmKQrKmQpZ3getFNbNiZQCl@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf15ArrayIndexErrorTmTmTAyaTmTnZQBzFNaNbNiKmKmKQxKmQvZQCh@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf15ArraySliceErrorTmTmTmTAyaTmTnZQCbFKmKmKmKQtKmQrZ3getFNbNiZQCp@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf15ArraySliceErrorTmTmTmTAyaTmTnZQCbFNaNbNiKmKmKmKQzKmQxZQCl@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf16OutOfMemoryErrorTbZQBqFNaNbNibZQBo@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf16OutOfMemoryErrorTbZQBqFbZ3getFNbNiZQBs@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf16OutOfMemoryErrorZQBoFNaNbNiZQBl@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf16OutOfMemoryErrorZQBoFZ3getFNbNiZQBp@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf27InvalidMemoryOperationErrorZQBzFNaNbNiZQBw@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf27InvalidMemoryOperationErrorZQBzFZ3getFNbNiZQCa@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf9ForkErrorTAyaTmTnZQBoFKQnKmQlZ3getFNbNiZQBw@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf9ForkErrorTAyaTmTnZQBoFNaNbNiKQtKmQrZQBs@Base 12
+ _D50TypeInfo_E4core3sys5linux10perf_event11perf_sw_ids6__initZ@Base 12
+ _D50TypeInfo_S4core3sys5linux4tipc13sockaddr_tipc4Addr6__initZ@Base 12
+ _D50TypeInfo_S4core3sys5linux8io_uring14io_uring_probe6__initZ@Base 12
+ _D50TypeInfo_S4core3sys5linuxQk7inotify13inotify_event6__initZ@Base 12
+ _D50TypeInfo_S4core3sys5posix5spawn17posix_spawnattr_t6__initZ@Base 12
+ _D50TypeInfo_S4core3sys5posix7netinet3in_11sockaddr_in6__initZ@Base 12
+ _D50TypeInfo_S4core3sys5posix7pthread15pthread_cleanup6__initZ@Base 12
+ _D50TypeInfo_S4core3sys5posixQk5types15pthread_mutex_t6__initZ@Base 12
+ _D50TypeInfo_S4core8internal8spinlock15AlignedSpinLock6__initZ@Base 12
+ _D51TypeInfo_E4core3sys5linux10perf_event12perf_type_id6__initZ@Base 12
+ _D51TypeInfo_E4core4sync7rwmutex14ReadWriteMutex6Policy6__initZ@Base 12
+ _D51TypeInfo_OS4core8internal8spinlock15AlignedSpinLock6__initZ@Base 12
+ _D51TypeInfo_S4core3sys5linux8io_uring15io_uring_params6__initZ@Base 12
+ _D51TypeInfo_S4core3sys5posix7netinet3in_12sockaddr_in66__initZ@Base 12
+ _D51TypeInfo_S4core3sys5posixQk5types16pthread_rwlock_t6__initZ@Base 12
+ _D51TypeInfo_xS4core3sys5posixQk5types15pthread_mutex_t6__initZ@Base 12
+ _D51TypeInfo_xS4core8internal8spinlock15AlignedSpinLock6__initZ@Base 12
+ _D52TypeInfo_OxS4core8internal8spinlock15AlignedSpinLock6__initZ@Base 12
+ _D52TypeInfo_S4core3sys5linux2fs22file_dedupe_range_info6__initZ@Base 12
+ _D52TypeInfo_S4core3sys5linux3elf11Elf32_gptab9_gt_entry6__initZ@Base 12
+ _D52TypeInfo_S4core3sys5posixQk5types17_pthread_fastlock6__initZ@Base 12
+ _D52TypeInfo_S4core3sys5posixQk5types17pthread_barrier_t6__initZ@Base 12
+ _D52TypeInfo_S4core3sys5posixQk6socket16sockaddr_storage6__initZ@Base 12
+ _D52TypeInfo_S4core8internal2gc4impl12conservativeQw3Gcx6__initZ@Base 12
+ _D53TypeInfo_E4core8internal2gc4impl12conservativeQw4Bins6__initZ@Base 12
+ _D53TypeInfo_S4core3sys5linux8io_uring17io_cqring_offsets6__initZ@Base 12
+ _D53TypeInfo_S4core3sys5linux8io_uring17io_sqring_offsets6__initZ@Base 12
+ _D53TypeInfo_S4core3sys5linux8io_uring17io_uring_probe_op6__initZ@Base 12
+ _D53TypeInfo_S4core3sys5posixQk5types18pthread_condattr_t6__initZ@Base 12
+ _D53TypeInfo_S4core8internal2gc4impl12conservativeQw4List6__initZ@Base 12
+ _D53TypeInfo_S4core8internal2gc4impl12conservativeQw4Pool6__initZ@Base 12
+ _D54TypeInfo_E4core3sys5linux10perf_event15perf_event_type6__initZ@Base 12
+ _D54TypeInfo_E4core8internal8spinlock8SpinLock10Contention6__initZ@Base 12
+ _D54TypeInfo_S4core3sys5linux10perf_event15perf_event_attr6__initZ@Base 12
+ _D54TypeInfo_S4core3sys5linux3elf11Elf32_gptab10_gt_header6__initZ@Base 12
+ _D54TypeInfo_S4core3sys5linux5stdio21cookie_io_functions_t6__initZ@Base 12
+ _D54TypeInfo_S4core3sys5linuxQk8signalfd16signalfd_siginfo6__initZ@Base 12
+ _D54TypeInfo_S4core3sys5posix9semaphore17_pthread_fastlock6__initZ@Base 12
+ _D54TypeInfo_S4core3sys5posixQk5types19pthread_mutexattr_t6__initZ@Base 12
+ _D54TypeInfo_xS4core8internal2gc4impl12conservativeQw4List6__initZ@Base 12
+ _D55TypeInfo_E4core3sys5linux10perf_event16perf_hw_cache_id6__initZ@Base 12
+ _D55TypeInfo_PxS4core8internal2gc4impl12conservativeQw4List6__initZ@Base 12
+ _D55TypeInfo_S4core3sys5linux4tipc13sockaddr_tipc4Addr4Name6__initZ@Base 12
+ _D55TypeInfo_S4core3sys5posix6signal9siginfo_t11_sifields_t6__initZ@Base 12
+ _D55TypeInfo_S4core3sys5posixQk5types20pthread_rwlockattr_t6__initZ@Base 12
+ _D55TypeInfo_S4core8internal9container5array__T5ArrayTAvZQk6__initZ@Base 12
+ _D55TypeInfo_xPS4core8internal2gc4impl12conservativeQw4List6__initZ@Base 12
+ _D56TypeInfo_AxPS4core8internal2gc4impl12conservativeQw4List6__initZ@Base 12
+ _D56TypeInfo_E2rt5minfo11ModuleGroup9sortCtorsMFAyaZ7OnCycle6__initZ@Base 12
+ _D56TypeInfo_S4core3sys5linux10perf_event17perf_branch_entry6__initZ@Base 12
+ _D56TypeInfo_S4core3sys5linux10perf_event17perf_event_header6__initZ@Base 12
+ _D56TypeInfo_S4core3sys5linux10perf_event17perf_mem_data_src6__initZ@Base 12
+ _D56TypeInfo_S4core3sys5linux10perf_event17perf_ns_link_info6__initZ@Base 12
+ _D56TypeInfo_S4core3sys5linux8io_uring20io_uring_restriction6__initZ@Base 12
+ _D56TypeInfo_S4core3sys5posixQk5types21pthread_barrierattr_t6__initZ@Base 12
+ _D56TypeInfo_S4core6stdcpp11string_view__T11char_traitsTaZQq6__initZ@Base 12
+ _D56TypeInfo_S4core6stdcpp11string_view__T11char_traitsTuZQq6__initZ@Base 12
+ _D56TypeInfo_S4core6stdcpp11string_view__T11char_traitsTwZQq6__initZ@Base 12
+ _D56TypeInfo_S4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa6__initZ@Base 12
+ _D56TypeInfo_S4core8internal6traits23__InoutWorkaroundStruct6__initZ@Base 12
+ _D56TypeInfo_xS4core8internal9container5array__T5ArrayTAvZQk6__initZ@Base 12
+ _D57TypeInfo_S4core3sys5linux8io_uring21io_uring_files_update6__initZ@Base 12
+ _D58TypeInfo_E4core3sys5linux10perf_event19perf_hw_cache_op_id6__initZ@Base 12
+ _D58TypeInfo_G14PxS4core8internal2gc4impl12conservativeQw4List6__initZ@Base 12
+ _D58TypeInfo_S4core3sys5linux8io_uring22io_uring_getevents_arg6__initZ@Base 12
+ _D58TypeInfo_S4core3sys5posix7pthread23_pthread_cleanup_buffer6__initZ@Base 12
+ _D58TypeInfo_xG14PS4core8internal2gc4impl12conservativeQw4List6__initZ@Base 12
+ _D59TypeInfo_E4core3sys5linux10perf_event20perf_event_ioc_flags6__initZ@Base 12
+ _D59TypeInfo_E4core3sys5linux10perf_event20perf_sample_regs_abi6__initZ@Base 12
+ _D59TypeInfo_S2rt9profilegc25_sharedStaticDtor_L115_C1FZ6Result6__initZ@Base 12
+ _D59TypeInfo_S4core3sys5linux10perf_event20perf_event_mmap_page6__initZ@Base 12
+ _D59TypeInfo_S4core3sys5posix5spawn26posix_spawn_file_actions_t6__initZ@Base 12
+ _D61TypeInfo_E4core3sys5linux10perf_event22perf_callchain_context6__initZ@Base 12
+ _D61TypeInfo_E4core3sys5linux10perf_event22perf_event_read_format6__initZ@Base 12
+ _D61TypeInfo_E4core8internal2gc4impl12conservativeQw4Pool7ShiftBy6__initZ@Base 12
+ _D61TypeInfo_S2rt5minfo11ModuleGroup12sortCtorsOldMFAAiZ8StackRec6__initZ@Base 12
+ _D61TypeInfo_S4core3sys5posix6signal9siginfo_t11_sifields_t5_rt_t6__initZ@Base 12
+ _D61TypeInfo_S4core4time__T12MonoTimeImplVEQBdQBb9ClockTypei0ZQBj6__initZ@Base 12
+ _D61TypeInfo_S4core8internal6string__T17TempStringNoAllocVhi20ZQz6__initZ@Base 12
+ _D62TypeInfo_E4core3sys5linux10perf_event23perf_branch_sample_type6__initZ@Base 12
+ _D62TypeInfo_S4core8internal2gc4impl12conservativeQw12LeakDetector6__initZ@Base 12
+ _D63TypeInfo_E4core3sys5linux10perf_event24perf_event_sample_format6__initZ@Base 12
+ _D63TypeInfo_S4core3sys5posix6signal9siginfo_t11_sifields_t7_kill_t6__initZ@Base 12
+ _D64TypeInfo_E4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa7AddType6__initZ@Base 12
+ _D64TypeInfo_S4core3sys5posix6signal9siginfo_t11_sifields_t8_timer_t6__initZ@Base 12
+ _D65TypeInfo_E4core3sys5linux10perf_event26perf_hw_cache_op_result_id6__initZ@Base 12
+ _D65TypeInfo_S4core4sync7rwmutex14ReadWriteMutex6Reader12MonitorProxy6__initZ@Base 12
+ _D65TypeInfo_S4core4sync7rwmutex14ReadWriteMutex6Writer12MonitorProxy6__initZ@Base 12
+ _D65TypeInfo_S4core8internal2gc4impl12conservativeQw15LargeObjectPool6__initZ@Base 12
+ _D65TypeInfo_S4core8internal2gc4impl12conservativeQw15SmallObjectPool6__initZ@Base 12
+ _D65TypeInfo_xE4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa7AddType6__initZ@Base 12
+ _D66TypeInfo_xS4core8internal2gc4impl12conservativeQw15SmallObjectPool6__initZ@Base 12
+ _D67TypeInfo_PxS4core8internal2gc4impl12conservativeQw15SmallObjectPool6__initZ@Base 12
+ _D67TypeInfo_S4core3sys5posix6signal9siginfo_t11_sifields_t10_sigpoll_t6__initZ@Base 12
+ _D67TypeInfo_S4core6stdcpp11type_traits__T17integral_constantTbVbi0ZQBa6__initZ@Base 12
+ _D67TypeInfo_S4core6stdcpp11type_traits__T17integral_constantTbVbi1ZQBa6__initZ@Base 12
+ _D67TypeInfo_xPS4core8internal2gc4impl12conservativeQw15SmallObjectPool6__initZ@Base 12
+ _D68TypeInfo_AxPS4core8internal2gc4impl12conservativeQw15SmallObjectPool6__initZ@Base 12
+ _D68TypeInfo_E4core3sys5linux10perf_event29perf_branch_sample_type_shift6__initZ@Base 12
+ _D68TypeInfo_E4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa10IsDelegate6__initZ@Base 12
+ _D68TypeInfo_S4core3sys5posix6signal9siginfo_t11_sifields_t11_sigchild_t6__initZ@Base 12
+ _D68TypeInfo_S4core3sys5posix6signal9siginfo_t11_sifields_t11_sigfault_t6__initZ@Base 12
+ _D68TypeInfo_S4core8internal2gc4impl12conservativeQw3Gcx14ScanThreadData6__initZ@Base 12
+ _D69TypeInfo_S4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks6__initZ@Base 12
+ _D69TypeInfo_xS4core8internal2gc4impl12conservativeQw3Gcx14ScanThreadData6__initZ@Base 12
+ _D6Object6__initZ@Base 12
+ _D6Object6__vtblZ@Base 12
+ _D6Object7__ClassZ@Base 12
+ _D6object10ModuleInfo11xgetMembersMxFNaNbNdNiZPv@Base 12
+ _D6object10ModuleInfo12localClassesMxFNaNbNdNiNjZAC14TypeInfo_Class@Base 12
+ _D6object10ModuleInfo15importedModulesMxFNaNbNdNiNjZAyPSQCcQBy@Base 12
+ _D6object10ModuleInfo4ctorMxFNaNbNdNiZPFZv@Base 12
+ _D6object10ModuleInfo4dtorMxFNaNbNdNiZPFZv@Base 12
+ _D6object10ModuleInfo4nameMxFNaNbNdNiNjZAya@Base 12
+ _D6object10ModuleInfo5flagsMxFNaNbNdNiZk@Base 12
+ _D6object10ModuleInfo5ictorMxFNaNbNdNiZPFZv@Base 12
+ _D6object10ModuleInfo5indexMxFNaNbNdNiZk@Base 12
+ _D6object10ModuleInfo6__initZ@Base 12
+ _D6object10ModuleInfo6addrOfMxFNaNbNiNjiZPv@Base 12
+ _D6object10ModuleInfo7opApplyFMDFPSQBhQBdZiZi@Base 12
+ _D6object10ModuleInfo7tlsctorMxFNaNbNdNiZPFZv@Base 12
+ _D6object10ModuleInfo7tlsdtorMxFNaNbNdNiZPFZv@Base 12
+ _D6object10ModuleInfo8opAssignMFxSQBgQBcZv@Base 12
+ _D6object10ModuleInfo8unitTestMxFNaNbNdNiZPFZv@Base 12
+ _D6object10_xopEqualsFIPvIQdZb@Base 12
+ _D6object10getElementFNaNbNeNkMNgC8TypeInfoZNgQn@Base 12
+ _D6object11__moduleRefZ@Base 12
+ _D6object12__ModuleInfoZ@Base 12
+ _D6object12getArrayHashFNbNeMxC8TypeInfoMxPvxmZ15hasCustomToHashFNaNbNeMxQBrZb@Base 12
+ _D6object12getArrayHashFNbNeMxC8TypeInfoMxPvxmZm@Base 12
+ _D6object12setSameMutexFOC6ObjectOQjZv@Base 12
+ _D6object13TypeInfo_Enum11initializerMxFNaNbNiNfZAxv@Base 12
+ _D6object13TypeInfo_Enum4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D6object13TypeInfo_Enum4swapMxFPvQcZv@Base 12
+ _D6object13TypeInfo_Enum5flagsMxFNaNbNdNiNfZk@Base 12
+ _D6object13TypeInfo_Enum5offTiMxFZAxSQBj14OffsetTypeInfo@Base 12
+ _D6object13TypeInfo_Enum5tsizeMxFNaNbNdNiNfZm@Base 12
+ _D6object13TypeInfo_Enum6equalsMxFIPvIQdZb@Base 12
+ _D6object13TypeInfo_Enum6rtInfoMxFNaNbNdNiNfZPyv@Base 12
+ _D6object13TypeInfo_Enum6talignMxFNaNbNdNiNfZm@Base 12
+ _D6object13TypeInfo_Enum7compareMxFIPvIQdZi@Base 12
+ _D6object13TypeInfo_Enum7destroyMxFPvZv@Base 12
+ _D6object13TypeInfo_Enum7getHashMxFNbNfMxPvZm@Base 12
+ _D6object13TypeInfo_Enum8opEqualsMFC6ObjectZb@Base 12
+ _D6object13TypeInfo_Enum8postblitMxFPvZv@Base 12
+ _D6object13TypeInfo_Enum8toStringMxFNaNbNfZAya@Base 12
+ _D6object14OffsetTypeInfo11__xopEqualsMxFKxSQBqQBmZb@Base 12
+ _D6object14OffsetTypeInfo6__initZ@Base 12
+ _D6object14OffsetTypeInfo9__xtoHashFNbNeKxSQBpQBlZm@Base 12
+ _D6object14TypeInfo_Array11initializerMxFNaNbNiNeZAxv@Base 12
+ _D6object14TypeInfo_Array4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D6object14TypeInfo_Array4swapMxFPvQcZv@Base 12
+ _D6object14TypeInfo_Array5flagsMxFNaNbNdNiNfZk@Base 12
+ _D6object14TypeInfo_Array5tsizeMxFNaNbNdNiNfZm@Base 12
+ _D6object14TypeInfo_Array6equalsMxFIPvIQdZb@Base 12
+ _D6object14TypeInfo_Array6rtInfoMxFNaNbNdNiNfZPyv@Base 12
+ _D6object14TypeInfo_Array6talignMxFNaNbNdNiNfZm@Base 12
+ _D6object14TypeInfo_Array7compareMxFIPvIQdZi@Base 12
+ _D6object14TypeInfo_Array7getHashMxFNbNeMxPvZm@Base 12
+ _D6object14TypeInfo_Array8opEqualsMFC6ObjectZb@Base 12
+ _D6object14TypeInfo_Array8toStringMxFNbNfZAya@Base 12
+ _D6object14TypeInfo_Class10ClassFlags6__initZ@Base 12
+ _D6object14TypeInfo_Class11initializerMxFNaNbNiNfZAxv@Base 12
+ _D6object14TypeInfo_Class4findFMxAaZxCQBd@Base 12
+ _D6object14TypeInfo_Class4infoMxFNaNbNdNiNjNfZxCQBn@Base 12
+ _D6object14TypeInfo_Class5flagsMxFNaNbNdNiNfZk@Base 12
+ _D6object14TypeInfo_Class5offTiMxFNaNbNdZAxSQBq14OffsetTypeInfo@Base 12
+ _D6object14TypeInfo_Class5tsizeMxFNaNbNdNiNfZm@Base 12
+ _D6object14TypeInfo_Class6createMxFZC6Object@Base 12
+ _D6object14TypeInfo_Class6equalsMxFIPvIQdZb@Base 12
+ _D6object14TypeInfo_Class6rtInfoMxFNaNbNdNiNfZPyv@Base 12
+ _D6object14TypeInfo_Class7compareMxFIPvIQdZi@Base 12
+ _D6object14TypeInfo_Class7getHashMxFNbNeMxPvZm@Base 12
+ _D6object14TypeInfo_Class8isBaseOfMxFNaNbNiNeMxCQBnZb@Base 12
+ _D6object14TypeInfo_Class8opEqualsMxFNbNfxC8TypeInfoZb@Base 12
+ _D6object14TypeInfo_Class8toStringMxFNaNbNfZAya@Base 12
+ _D6object14TypeInfo_Class8typeinfoMxFNaNbNdNiNjNfZxCQBr@Base 12
+ _D6object14TypeInfo_Const11initializerMxFNaNbNiNfZAxv@Base 12
+ _D6object14TypeInfo_Const4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D6object14TypeInfo_Const4swapMxFPvQcZv@Base 12
+ _D6object14TypeInfo_Const5flagsMxFNaNbNdNiNfZk@Base 12
+ _D6object14TypeInfo_Const5tsizeMxFNaNbNdNiNfZm@Base 12
+ _D6object14TypeInfo_Const6equalsMxFIPvIQdZb@Base 12
+ _D6object14TypeInfo_Const6talignMxFNaNbNdNiNfZm@Base 12
+ _D6object14TypeInfo_Const7compareMxFIPvIQdZi@Base 12
+ _D6object14TypeInfo_Const7getHashMxFNbNfMxPvZm@Base 12
+ _D6object14TypeInfo_Const8opEqualsMFC6ObjectZb@Base 12
+ _D6object14TypeInfo_Const8toStringMxFNbNfZAya@Base 12
+ _D6object14TypeInfo_Inout8toStringMxFNbNfZAya@Base 12
+ _D6object14TypeInfo_Tuple11initializerMxFNaNbNiNeZAxv@Base 12
+ _D6object14TypeInfo_Tuple4swapMxFPvQcZv@Base 12
+ _D6object14TypeInfo_Tuple5tsizeMxFNaNbNdNiNfZm@Base 12
+ _D6object14TypeInfo_Tuple6equalsMxFIPvIQdZb@Base 12
+ _D6object14TypeInfo_Tuple6talignMxFNaNbNdNiNfZm@Base 12
+ _D6object14TypeInfo_Tuple7compareMxFIPvIQdZi@Base 12
+ _D6object14TypeInfo_Tuple7destroyMxFPvZv@Base 12
+ _D6object14TypeInfo_Tuple7getHashMxFNbNfMxPvZm@Base 12
+ _D6object14TypeInfo_Tuple8opEqualsMFC6ObjectZb@Base 12
+ _D6object14TypeInfo_Tuple8postblitMxFPvZv@Base 12
+ _D6object14TypeInfo_Tuple8toStringMxFNbNfZAya@Base 12
+ _D6object15TypeInfo_Shared8toStringMxFNbNfZAya@Base 12
+ _D6object15TypeInfo_Struct11StructFlags6__initZ@Base 12
+ _D6object15TypeInfo_Struct11_memberFunc6__initZ@Base 12
+ _D6object15TypeInfo_Struct11initializerMxFNaNbNiNfZAxv@Base 12
+ _D6object15TypeInfo_Struct4nameMxFNbNdNeZ19demangledNamesCacheHPxvAya@Base 12
+ _D6object15TypeInfo_Struct4nameMxFNbNdNeZAya@Base 12
+ _D6object15TypeInfo_Struct5flagsMxFNaNbNdNiNfZk@Base 12
+ _D6object15TypeInfo_Struct5tsizeMxFNaNbNdNiNfZm@Base 12
+ _D6object15TypeInfo_Struct6equalsMxFNaNbNeIPvIQdZb@Base 12
+ _D6object15TypeInfo_Struct6rtInfoMxFNaNbNdNiNfZPyv@Base 12
+ _D6object15TypeInfo_Struct6talignMxFNaNbNdNiNfZm@Base 12
+ _D6object15TypeInfo_Struct6toHashMxFNbNfZm@Base 12
+ _D6object15TypeInfo_Struct7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D6object15TypeInfo_Struct7destroyMxFPvZv@Base 12
+ _D6object15TypeInfo_Struct7getHashMxFNaNbNeMxPvZm@Base 12
+ _D6object15TypeInfo_Struct8opEqualsMFC6ObjectZb@Base 12
+ _D6object15TypeInfo_Struct8postblitMxFPvZv@Base 12
+ _D6object15TypeInfo_Struct8toStringMxFNbNfZAya@Base 12
+ _D6object15TypeInfo_Vector11initializerMxFNaNbNiNfZAxv@Base 12
+ _D6object15TypeInfo_Vector4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D6object15TypeInfo_Vector4swapMxFPvQcZv@Base 12
+ _D6object15TypeInfo_Vector5flagsMxFNaNbNdNiNfZk@Base 12
+ _D6object15TypeInfo_Vector5tsizeMxFNaNbNdNiNfZm@Base 12
+ _D6object15TypeInfo_Vector6equalsMxFIPvIQdZb@Base 12
+ _D6object15TypeInfo_Vector6talignMxFNaNbNdNiNfZm@Base 12
+ _D6object15TypeInfo_Vector7compareMxFIPvIQdZi@Base 12
+ _D6object15TypeInfo_Vector7getHashMxFNbNfMxPvZm@Base 12
+ _D6object15TypeInfo_Vector8opEqualsMFC6ObjectZb@Base 12
+ _D6object15TypeInfo_Vector8toStringMxFNbNfZAya@Base 12
+ _D6object16TypeInfo_Pointer11initializerMxFNaNbNiNeZAxv@Base 12
+ _D6object16TypeInfo_Pointer4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D6object16TypeInfo_Pointer4swapMxFPvQcZv@Base 12
+ _D6object16TypeInfo_Pointer5flagsMxFNaNbNdNiNfZk@Base 12
+ _D6object16TypeInfo_Pointer5tsizeMxFNaNbNdNiNfZm@Base 12
+ _D6object16TypeInfo_Pointer6equalsMxFIPvIQdZb@Base 12
+ _D6object16TypeInfo_Pointer7compareMxFIPvIQdZi@Base 12
+ _D6object16TypeInfo_Pointer7getHashMxFNbNeMxPvZm@Base 12
+ _D6object16TypeInfo_Pointer8opEqualsMFC6ObjectZb@Base 12
+ _D6object16TypeInfo_Pointer8toStringMxFNbNfZAya@Base 12
+ _D6object17TypeInfo_Delegate11initializerMxFNaNbNiNeZAxv@Base 12
+ _D6object17TypeInfo_Delegate5flagsMxFNaNbNdNiNfZk@Base 12
+ _D6object17TypeInfo_Delegate5tsizeMxFNaNbNdNiNfZm@Base 12
+ _D6object17TypeInfo_Delegate6equalsMxFIPvIQdZb@Base 12
+ _D6object17TypeInfo_Delegate6rtInfoMxFNaNbNdNiNfZPyv@Base 12
+ _D6object17TypeInfo_Delegate6talignMxFNaNbNdNiNfZm@Base 12
+ _D6object17TypeInfo_Delegate7compareMxFIPvIQdZi@Base 12
+ _D6object17TypeInfo_Delegate7getHashMxFNbNeMxPvZm@Base 12
+ _D6object17TypeInfo_Delegate8opEqualsMFC6ObjectZb@Base 12
+ _D6object17TypeInfo_Delegate8toStringMxFNaNbNeZAya@Base 12
+ _D6object17TypeInfo_Function11initializerMxFNaNbNiNfZAxv@Base 12
+ _D6object17TypeInfo_Function5tsizeMxFNaNbNdNiNfZm@Base 12
+ _D6object17TypeInfo_Function6rtInfoMxFNaNbNdNiNfZPyv@Base 12
+ _D6object17TypeInfo_Function8opEqualsMFC6ObjectZb@Base 12
+ _D6object17TypeInfo_Function8toStringMxFNaNbNeZAya@Base 12
+ _D6object18TypeInfo_Interface11initializerMxFNaNbNiNeZAxv@Base 12
+ _D6object18TypeInfo_Interface5flagsMxFNaNbNdNiNfZk@Base 12
+ _D6object18TypeInfo_Interface5tsizeMxFNaNbNdNiNfZm@Base 12
+ _D6object18TypeInfo_Interface6equalsMxFIPvIQdZb@Base 12
+ _D6object18TypeInfo_Interface7compareMxFIPvIQdZi@Base 12
+ _D6object18TypeInfo_Interface7getHashMxFNbNeMxPvZm@Base 12
+ _D6object18TypeInfo_Interface8isBaseOfMxFNaNbNiNeMxC14TypeInfo_ClassZb@Base 12
+ _D6object18TypeInfo_Interface8isBaseOfMxFNaNbNiNeMxCQBrZb@Base 12
+ _D6object18TypeInfo_Interface8opEqualsMFC6ObjectZb@Base 12
+ _D6object18TypeInfo_Interface8toStringMxFNaNbNfZAya@Base 12
+ _D6object18TypeInfo_Invariant8toStringMxFNbNfZAya@Base 12
+ _D6object19__cpp_type_info_ptr6__initZ@Base 12
+ _D6object19__cpp_type_info_ptr6__vtblZ@Base 12
+ _D6object19__cpp_type_info_ptr7__ClassZ@Base 12
+ _D6object20TypeInfo_StaticArray11initializerMxFNaNbNiNfZAxv@Base 12
+ _D6object20TypeInfo_StaticArray4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D6object20TypeInfo_StaticArray4swapMxFPvQcZv@Base 12
+ _D6object20TypeInfo_StaticArray5flagsMxFNaNbNdNiNfZk@Base 12
+ _D6object20TypeInfo_StaticArray5tsizeMxFNaNbNdNiNfZm@Base 12
+ _D6object20TypeInfo_StaticArray6equalsMxFIPvIQdZb@Base 12
+ _D6object20TypeInfo_StaticArray6rtInfoMxFNaNbNdNiNfZPyv@Base 12
+ _D6object20TypeInfo_StaticArray6talignMxFNaNbNdNiNfZm@Base 12
+ _D6object20TypeInfo_StaticArray7compareMxFIPvIQdZi@Base 12
+ _D6object20TypeInfo_StaticArray7destroyMxFPvZv@Base 12
+ _D6object20TypeInfo_StaticArray7getHashMxFNbNeMxPvZm@Base 12
+ _D6object20TypeInfo_StaticArray8opEqualsMFC6ObjectZb@Base 12
+ _D6object20TypeInfo_StaticArray8postblitMxFPvZv@Base 12
+ _D6object20TypeInfo_StaticArray8toStringMxFNbNfZAya@Base 12
+ _D6object25TypeInfo_AssociativeArray11initializerMxFNaNbNiNeZAxv@Base 12
+ _D6object25TypeInfo_AssociativeArray4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D6object25TypeInfo_AssociativeArray5flagsMxFNaNbNdNiNfZk@Base 12
+ _D6object25TypeInfo_AssociativeArray5tsizeMxFNaNbNdNiNfZm@Base 12
+ _D6object25TypeInfo_AssociativeArray6equalsMxFNeIPvIQdZb@Base 12
+ _D6object25TypeInfo_AssociativeArray6talignMxFNaNbNdNiNfZm@Base 12
+ _D6object25TypeInfo_AssociativeArray7getHashMxFNbNeMxPvZm@Base 12
+ _D6object25TypeInfo_AssociativeArray8opEqualsMFC6ObjectZb@Base 12
+ _D6object25TypeInfo_AssociativeArray8toStringMxFNbNfZAya@Base 12
+ _D6object2AA6__initZ@Base 12
+ _D6object5Error6__ctorMFNaNbNiNfAyaCQBi9ThrowableZCQBxQBt@Base 12
+ _D6object5Error6__ctorMFNaNbNiNfAyaQdmCQBl9ThrowableZCQCaQBw@Base 12
+ _D6object5Error6__initZ@Base 12
+ _D6object5Error6__vtblZ@Base 12
+ _D6object5Error7__ClassZ@Base 12
+ _D6object6Object5opCmpMFCQqZi@Base 12
+ _D6object6Object6toHashMFNbNeZm@Base 12
+ _D6object6Object7Monitor11__InterfaceZ@Base 12
+ _D6object6Object7factoryFAyaZCQv@Base 12
+ _D6object6Object8opEqualsMFCQtZb@Base 12
+ _D6object6Object8toStringMFZAya@Base 12
+ _D6object7AARange6__initZ@Base 12
+ _D6object7_xopCmpFIPvIQdZb@Base 12
+ _D6object8TypeInfo4nextMNgFNaNbNdNiZNgCQBe@Base 12
+ _D6object8TypeInfo4swapMxFPvQcZv@Base 12
+ _D6object8TypeInfo5flagsMxFNaNbNdNiNfZk@Base 12
+ _D6object8TypeInfo5offTiMxFZAxSQBd14OffsetTypeInfo@Base 12
+ _D6object8TypeInfo5opCmpMFC6ObjectZi@Base 12
+ _D6object8TypeInfo5tsizeMxFNaNbNdNiNfZm@Base 12
+ _D6object8TypeInfo6equalsMxFIPvIQdZb@Base 12
+ _D6object8TypeInfo6rtInfoMxFNaNbNdNiNfZPyv@Base 12
+ _D6object8TypeInfo6talignMxFNaNbNdNiNfZm@Base 12
+ _D6object8TypeInfo6toHashMxFNbNeZm@Base 12
+ _D6object8TypeInfo7compareMxFIPvIQdZi@Base 12
+ _D6object8TypeInfo7destroyMxFPvZv@Base 12
+ _D6object8TypeInfo7getHashMxFNbNeMxPvZm@Base 12
+ _D6object8TypeInfo8opEqualsMFC6ObjectZb@Base 12
+ _D6object8TypeInfo8opEqualsMxFNbNfxCQBbZb@Base 12
+ _D6object8TypeInfo8postblitMxFPvZv@Base 12
+ _D6object8TypeInfo8toStringMxFNbNfZAya@Base 12
+ _D6object9Exception6__ctorMFNaNbNiNfAyaCQBm9ThrowableQrmZCQBx@Base 12
+ _D6object9Exception6__ctorMFNaNbNiNfAyaQdmCQBp9ThrowableZCQBx@Base 12
+ _D6object9Interface11__xopEqualsMxFKxSQBkQBgZb@Base 12
+ _D6object9Interface6__initZ@Base 12
+ _D6object9Interface9__xtoHashFNbNeKxSQBjQBfZm@Base 12
+ _D6object9Throwable13chainTogetherFNaNbNiNkMCQBrQBnNkMQkZQn@Base 12
+ _D6object9Throwable4nextMFNaNbNdNiNlNfCQBlQBhZv@Base 12
+ _D6object9Throwable4nextMNgFNaNbNdNiNjNfZNgCQBqQBm@Base 12
+ _D6object9Throwable6__ctorMFNaNbNiNfAyaCQBmQBiZQi@Base 12
+ _D6object9Throwable6__ctorMFNaNbNiNfAyaQdmCQBpQBlZQi@Base 12
+ _D6object9Throwable6__dtorMFNbNeZv@Base 12
+ _D6object9Throwable6__initZ@Base 12
+ _D6object9Throwable6__vtblZ@Base 12
+ _D6object9Throwable7__ClassZ@Base 12
+ _D6object9Throwable7messageMxFNbNfZAxa@Base 12
+ _D6object9Throwable7opApplyMFMDFCQBfQBbZiZi@Base 12
+ _D6object9Throwable8refcountMFNaNbNcNiNjZk@Base 12
+ _D6object9Throwable8toStringMFZAya@Base 12
+ _D6object9Throwable8toStringMxFMDFIAaZvZv@Base 12
+ _D6object9Throwable9TraceInfo11__InterfaceZ@Base 12
+ _D6object__T10RTInfoImplVAmA2i104i2048ZQBbyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i112i11274ZQBcyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i112i3ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i112i721ZQBayG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i120i16424ZQBcyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i120i21610ZQBcyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i120i4ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i128i12ZQzyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i128i512ZQBayG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i12i1ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i152i347816ZQBdyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i168i4244ZQBbyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i16i1ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i16i2ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i16i3ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i176i3931280ZQBeyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i177i3931280ZQBeyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i204i1448ZQBbyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i216i8011774ZQBeyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i232i1448ZQBbyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i24i1ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i24i2ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i24i3ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i24i4ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i24i5ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i24i6ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i24i7ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i256i8388608ZQBeyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i272i2158144171ZQBhyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i32i10ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i32i11ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i32i12ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i32i13ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i32i14ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i32i15ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i32i1ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i32i2ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i32i4ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i32i5ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i32i7ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i32i8ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i32i9ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i40i15ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i40i18ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i40i20ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i40i22ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i40i24ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i40i2ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i40i30ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i44i12ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i48i1ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i48i24ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i48i31ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i48i32ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i48i42ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i48i44ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i48i56ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i48i59ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i48i63ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i56i10ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i56i123ZQzyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i56i21ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i56i40ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i56i64ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i56i84ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i64i10ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i64i134ZQzyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i64i9ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i72i256ZQzyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i72i4ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i72i5ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i76i424ZQzyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i80i248ZQzyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i80i2ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i80i516ZQzyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i88i1448ZQBayG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i88i424ZQzyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i8i1ZQwyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i96i1023ZQBayG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i96i1154ZQBayG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i96i3496ZQBayG2m@Base 12
+ _D6object__T10RTInfoImplVAmA3i968i268435462i0ZQBiyG3m@Base 12
+ _D6object__T10RTInfoImplVAmA4i1064i549755827528i0i16ZQBpyG4m@Base 12
+ _D6object__T10RTInfoImplVAmA4i1152i144107491482206208i565149010231808i0ZQCiyG4m@Base 12
+ _D6object__T3dupTaZQhFNaNbNdNfAxaZAa@Base 12
+ _D6object__T4_dupTaTyaZQlFNaNbNeMAaZAya@Base 12
+ _D6object__T4_dupTxaTaZQlFNaNbNeMAxaZAa@Base 12
+ _D6object__T4idupTaZQiFNaNbNdNfAaZAya@Base 12
+ _D6object__T4keysHTHC4core6thread8osthread6ThreadQBdTQBhTQBlZQBxFNaNbNdNfQCcZAQCg@Base 12
+ _D6object__T7destroyVbi1TC4core2gc11gcinterface2GCZQBnFNbQBgZv@Base 12
+ _D6object__T7destroyVbi1TC6ObjectZQwFNbQoZv@Base 12
+ _D6object__T7destroyVbi1TS3gcc8sections3elf9ThreadDSOZQBqFNaNbNiNfKQBqZv@Base 12
+ _D6object__T7destroyVbi1TS4core2gc11gcinterface4RootZQBpFNaNbNiNfKQBpZv@Base 12
+ _D6object__T7destroyVbi1TS4core2gc11gcinterface5RangeZQBqFNaNbNiNfKQBqZv@Base 12
+ _D6object__T7destroyVbi1TS4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQDoFNaNbNiNfKQDoZv@Base 12
+ _D6object__T7destroyVbi1TS4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQDqFNaNbNiNfKQDqZv@Base 12
+ _D6object__T7destroyVbi1TS4core8internal9container7hashtab__T7HashTabTPySQCt10ModuleInfoTiZQBe4NodeZQDkFNaNbNiNfKQDkZv@Base 12
+ _D6object__T7reserveTuZQlFNaNbNeKAumZm@Base 12
+ _D6object__T8_dupCtfeTaTyaZQpFNaNbNfMAaZAya@Base 12
+ _D6object__T8_dupCtfeTxaTaZQpFNaNbNfMAxaZAa@Base 12
+ _D6object__T8opEqualsTC14TypeInfo_ClassTQsZQBfFNbNfQBdQBgZb@Base 12
+ _D6object__T8opEqualsTC6ObjectTQjZQwFQpQrZb@Base 12
+ _D6object__T8opEqualsTC8TypeInfoTxCQmZQBaFNbNfQyxQpZb@Base 12
+ _D6object__T8opEqualsTxC14TypeInfo_ClassTxQtZQBhFNbNfxQBfxQBjZb@Base 12
+ _D6object__T8opEqualsTxC15TypeInfo_StructTxQuZQBiFxQBcxQBgZb@Base 12
+ _D6object__T8opEqualsTxC6ObjectTxQkZQyFxQrxQuZb@Base 12
+ _D6object__T8opEqualsTxC8TypeInfoTxQmZQBaFNbNfxQyxQBbZb@Base 12
+ _D6object__T8opEqualsTxCQw9ThrowableTxQpZQBdFxQxxQBaZb@Base 12
+ _D70TypeInfo_G14PxS4core8internal2gc4impl12conservativeQw15SmallObjectPool6__initZ@Base 12
+ _D70TypeInfo_PxS4core8internal2gc4impl12conservativeQw3Gcx14ScanThreadData6__initZ@Base 12
+ _D70TypeInfo_xG14PS4core8internal2gc4impl12conservativeQw15SmallObjectPool6__initZ@Base 12
+ _D70TypeInfo_xPS4core8internal2gc4impl12conservativeQw3Gcx14ScanThreadData6__initZ@Base 12
+ _D70TypeInfo_xS4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks6__initZ@Base 12
+ _D72TypeInfo_S4core8internal2gc4impl12conservativeQw3Gcx__T9ScanRangeVbi0ZQp6__initZ@Base 12
+ _D72TypeInfo_S4core8internal2gc4impl12conservativeQw3Gcx__T9ScanRangeVbi1ZQp6__initZ@Base 12
+ _D74TypeInfo_S4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTPvZQr6__initZ@Base 12
+ _D75TypeInfo_S2rt5minfo11ModuleGroup9sortCtorsMFAyaZ8findDepsMFmPmZ10stackFrame6__initZ@Base 12
+ _D75TypeInfo_xS4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTPvZQr6__initZ@Base 12
+ _D77TypeInfo_S4core8demangle__T6mangleTFNbNiZmZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D77TypeInfo_S4core8demangle__T6mangleTFNbNiZvZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D77TypeInfo_S4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf6__initZ@Base 12
+ _D78TypeInfo_S4core8demangle__T6mangleTFNbNiZPvZQrFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D78TypeInfo_xS4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf6__initZ@Base 12
+ _D79TypeInfo_S4core8demangle__T6mangleTFNbNiPvZvZQsFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D79TypeInfo_S4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh6__initZ@Base 12
+ _D79TypeInfo_S4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh6__initZ@Base 12
+ _D80TypeInfo_S4core8demangle__T6mangleTFNbNiPvZQdZQtFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D80TypeInfo_S4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi6__initZ@Base 12
+ _D80TypeInfo_S4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi6__initZ@Base 12
+ _D80TypeInfo_xS4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh6__initZ@Base 12
+ _D81TypeInfo_xS4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi6__initZ@Base 12
+ _D82TypeInfo_S4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks11Replacement6__initZ@Base 12
+ _D82TypeInfo_S4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk6__initZ@Base 12
+ _D82TypeInfo_S4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi6__initZ@Base 12
+ _D82TypeInfo_S4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi6__initZ@Base 12
+ _D83TypeInfo_AS4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks11Replacement6__initZ@Base 12
+ _D83TypeInfo_xS4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks11Replacement6__initZ@Base 12
+ _D84TypeInfo_AxS4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks11Replacement6__initZ@Base 12
+ _D84TypeInfo_S4core8demangle__T6mangleTFNbMDFNbPvZvZvZQxFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D84TypeInfo_S4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh4Node6__initZ@Base 12
+ _D84TypeInfo_S4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk6__initZ@Base 12
+ _D84TypeInfo_xAS4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks11Replacement6__initZ@Base 12
+ _D85TypeInfo_S4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi4Node6__initZ@Base 12
+ _D86TypeInfo_S4core8demangle__T6mangleTFNbNiAyakQeQgmZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D86TypeInfo_S4core8demangle__T6mangleTFNbPvMDFNbQhZiZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D86TypeInfo_xS4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi4Node6__initZ@Base 12
+ _D87TypeInfo_PxS4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi4Node6__initZ@Base 12
+ _D87TypeInfo_S4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4Node6__initZ@Base 12
+ _D87TypeInfo_S4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4Node6__initZ@Base 12
+ _D87TypeInfo_xPS4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi4Node6__initZ@Base 12
+ _D88TypeInfo_xS4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4Node6__initZ@Base 12
+ _D89TypeInfo_PxS4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4Node6__initZ@Base 12
+ _D89TypeInfo_S4core8demangle__T6mangleTFNbPvMDFNbQhQjZvZvZQBbFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D89TypeInfo_S4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4Node6__initZ@Base 12
+ _D89TypeInfo_xPS4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4Node6__initZ@Base 12
+ _D8TypeInfo6__initZ@Base 12
+ _D8TypeInfo6__vtblZ@Base 12
+ _D8TypeInfo7__ClassZ@Base 12
+ _D92TypeInfo_S4core8internal2gc9pooltable__T9PoolTableTSQBqQBoQBi4impl12conservativeQCe4PoolZQBx6__initZ@Base 12
+ _D93TypeInfo_S4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl6__initZ@Base 12
+ _D93TypeInfo_xS4core8internal2gc9pooltable__T9PoolTableTSQBqQBoQBi4impl12conservativeQCe4PoolZQBx6__initZ@Base 12
+ _D95TypeInfo_S3gcc8sections3elf18findDSOInfoForAddrFNbNiIPvPS4core3sys5linux4link12dl_phdr_infoZ2DG6__initZ@Base 12
+ _D95TypeInfo_S4core6stdcpp11string_view__T17basic_string_viewTaTSQBzQBxQBt__T11char_traitsTaZQqZQCc6__initZ@Base 12
+ _D95TypeInfo_S4core6stdcpp11string_view__T17basic_string_viewTuTSQBzQBxQBt__T11char_traitsTuZQqZQCc6__initZ@Base 12
+ _D95TypeInfo_S4core6stdcpp11string_view__T17basic_string_viewTwTSQBzQBxQBt__T11char_traitsTwZQqZQCc6__initZ@Base 12
+ _D95TypeInfo_S4core8demangle__T6mangleTFNbNiAyaMDFNbNiQkZQnbZQrZQBhFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D9Exception6__initZ@Base 12
+ _D9Exception6__vtblZ@Base 12
+ _D9Exception7__ClassZ@Base 12
+ _D9invariant11__moduleRefZ@Base 12
+ _D9invariant12__ModuleInfoZ@Base 12
+ _D9invariant12_d_invariantFC6ObjectZv@Base 12
+ _DTi16_D3gcc9backtrace12LibBacktrace7opApplyMxFMDFKmKxAaZiZi@Base 12
+ _DTi16_D3gcc9backtrace12LibBacktrace7opApplyMxFMDFKxAaZiZi@Base 12
+ _DTi16_D3gcc9backtrace12LibBacktrace8toStringMxFZAya@Base 12
+ _DTi16_D4core4sync5mutex5Mutex4lockMFNeZv@Base 12
+ _DTi16_D4core4sync5mutex5Mutex6unlockMFNeZv@Base 12
+ _DTi16_D4core4sync7rwmutex14ReadWriteMutex6Reader4lockMFNeZv@Base 12
+ _DTi16_D4core4sync7rwmutex14ReadWriteMutex6Reader6unlockMFNeZv@Base 12
+ _DTi16_D4core4sync7rwmutex14ReadWriteMutex6Writer4lockMFNeZv@Base 12
+ _DTi16_D4core4sync7rwmutex14ReadWriteMutex6Writer6unlockMFNeZv@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC10removeRootMFNbNiPvZv@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC11inFinalizerMFNbNiNfZb@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC11removeRangeMFNbNiPvZv@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC12profileStatsMFNbNiNeZSQDa6memory2GC12ProfileStats@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC13runFinalizersMFNbMxAvZv@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC14collectNoStackMFNbZv@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC24allocatedInCurrentThreadMFNbZm@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC4freeMFNbNiPvZv@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC5queryMFNbPvZSQCq6memory8BlkInfo_@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC5statsMFNbNiNfZSQCs6memory2GC5Stats@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC6addrOfMFNbNiPvZQd@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC6callocMFNbmkxC8TypeInfoZPv@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC6enableMFZv@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC6extendMFNbPvmmxC8TypeInfoZm@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC6mallocMFNbmkxC8TypeInfoZPv@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC6qallocMFNbmkMxC8TypeInfoZSQDd6memory8BlkInfo_@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC6sizeOfMFNbNiPvZm@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC7addRootMFNbNiPvZv@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC7clrAttrMFNbPvkZk@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC7collectMFNbZv@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC7disableMFZv@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC7getAttrMFNbPvZk@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC7reallocMFNbPvmkxC8TypeInfoZQq@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC7reserveMFNbmZm@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC7setAttrMFNbPvkZk@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC8addRangeMFNbNiPvmxC8TypeInfoZv@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC8minimizeMFNbZv@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC8rootIterMFNdNiZDFMDFNbKSQDbQCq11gcinterface4RootZiZi@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC9rangeIterMFNdNiZDFMDFNbKSQDcQCr11gcinterface5RangeZiZi@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC10removeRootMFNbNiPvZv@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC11inFinalizerMFNbNiNfZb@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC11removeRangeMFNbNiPvZv@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC12profileStatsMFNbNiNfZSQCk6memory2GC12ProfileStats@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC13runFinalizersMFNbMxAvZv@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC14collectNoStackMFNbZv@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC24allocatedInCurrentThreadMFNbZm@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC4freeMFNbNiPvZv@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC5queryMFNbPvZSQCa6memory8BlkInfo_@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC5statsMFNbNiNfZSQCc6memory2GC5Stats@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC6addrOfMFNbNiPvZQd@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC6callocMFNbmkMxC8TypeInfoZPv@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC6enableMFZv@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC6extendMFNbPvmmMxC8TypeInfoZm@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC6mallocMFNbmkMxC8TypeInfoZPv@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC6qallocMFNbmkMxC8TypeInfoZSQCn6memory8BlkInfo_@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC6sizeOfMFNbNiPvZm@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC7addRootMFNbNiPvZv@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC7clrAttrMFNbPvkZk@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC7collectMFNbZv@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC7disableMFZv@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC7getAttrMFNbPvZk@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC7reallocMFNbPvmkMxC8TypeInfoZQr@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC7reserveMFNbmZm@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC7setAttrMFNbPvkZk@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC8addRangeMFNbNiPvmxC8TypeInfoZv@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC8minimizeMFNbZv@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC8rootIterMFNdNiNjZDFMDFNbKSQCnQCc11gcinterface4RootZiZi@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC9rangeIterMFNdNiNjZDFMDFNbKSQCoQCd11gcinterface5RangeZiZi@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC10removeRootMFNbNiPvZv@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC11inFinalizerMFNbNiNfZb@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC11removeRangeMFNbNiPvZv@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC12profileStatsMFNbNiNfZSQCm6memory2GC12ProfileStats@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC13runFinalizersMFNbMxAvZv@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC14collectNoStackMFNbZv@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC24allocatedInCurrentThreadMFNbZm@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC4freeMFNbNiPvZv@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC5queryMFNbPvZSQCc6memory8BlkInfo_@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC5statsMFNbNiNfZSQCe6memory2GC5Stats@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC6addrOfMFNbNiPvZQd@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC6callocMFNbmkxC8TypeInfoZPv@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC6enableMFZv@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC6extendMFNbPvmmxC8TypeInfoZm@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC6mallocMFNbmkxC8TypeInfoZPv@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC6qallocMFNbmkMxC8TypeInfoZSQCp6memory8BlkInfo_@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC6sizeOfMFNbNiPvZm@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC7addRootMFNbNiPvZv@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC7clrAttrMFNbPvkZk@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC7collectMFNbZv@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC7disableMFZv@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC7getAttrMFNbPvZk@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC7reallocMFNbPvmkxC8TypeInfoZQq@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC7reserveMFNbmZm@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC7setAttrMFNbPvkZk@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC8addRangeMFNbNiPvmxC8TypeInfoZv@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC8minimizeMFNbZv@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC8rootIterMFNdNiNjZDFMDFNbKSQCpQCe11gcinterface4RootZiZi@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC9rangeIterMFNdNiNjZDFMDFNbKSQCqQCf11gcinterface5RangeZiZi@Base 12
+ _DTi16_D4core9exception17SuppressTraceInfo7opApplyMxFMDFKmKxAaZiZi@Base 12
+ _DTi16_D4core9exception17SuppressTraceInfo7opApplyMxFMDFKxAaZiZi@Base 12
+ _DTi16_D4core9exception17SuppressTraceInfo8toStringMxFZAya@Base 12
+ _ZNKSt10bad_typeid4whatEv@Base 12
+ _ZNKSt13bad_exception4whatEv@Base 12
+ _ZNKSt8bad_cast4whatEv@Base 12
+ _ZNKSt9exception4whatEv@Base 12
+ _ZNKSt9type_info4nameEv@Base 12
+ _ZNKSt9type_info6beforeEPKS_@Base 12
+ _ZNSt9bad_allocC1Ev@Base 12
+ _ZNSt9exceptionD1Ev@Base 12
+ _ZNSt9type_infoD1Ev@Base 12
+ __CPUELT@Base 12
+ __CPUMASK@Base 12
+ __CPU_ALLOC@Base 12
+ __CPU_ALLOC_SIZE@Base 12
+ __CPU_COUNT_S@Base 12
+ __CPU_FREE@Base 12
+ __CPU_ISSET_S@Base 12
+ __CPU_SET_S@Base 12
+ __atomic_add_fetch_16@Base 12
+ __atomic_add_fetch_1@Base 12
+ __atomic_add_fetch_2@Base 12
+ __atomic_add_fetch_4@Base 12
+ __atomic_add_fetch_8@Base 12
+ __atomic_and_fetch_16@Base 12
+ __atomic_and_fetch_1@Base 12
+ __atomic_and_fetch_2@Base 12
+ __atomic_and_fetch_4@Base 12
+ __atomic_and_fetch_8@Base 12
+ __atomic_compare_exchange@Base 12
+ __atomic_compare_exchange_16@Base 12
+ __atomic_compare_exchange_1@Base 12
+ __atomic_compare_exchange_2@Base 12
+ __atomic_compare_exchange_4@Base 12
+ __atomic_compare_exchange_8@Base 12
+ __atomic_exchange@Base 12
+ __atomic_exchange_16@Base 12
+ __atomic_exchange_1@Base 12
+ __atomic_exchange_2@Base 12
+ __atomic_exchange_4@Base 12
+ __atomic_exchange_8@Base 12
+ __atomic_feraiseexcept@Base 12
+ __atomic_fetch_add_16@Base 12
+ __atomic_fetch_add_1@Base 12
+ __atomic_fetch_add_2@Base 12
+ __atomic_fetch_add_4@Base 12
+ __atomic_fetch_add_8@Base 12
+ __atomic_fetch_and_16@Base 12
+ __atomic_fetch_and_1@Base 12
+ __atomic_fetch_and_2@Base 12
+ __atomic_fetch_and_4@Base 12
+ __atomic_fetch_and_8@Base 12
+ __atomic_fetch_nand_16@Base 12
+ __atomic_fetch_nand_1@Base 12
+ __atomic_fetch_nand_2@Base 12
+ __atomic_fetch_nand_4@Base 12
+ __atomic_fetch_nand_8@Base 12
+ __atomic_fetch_or_16@Base 12
+ __atomic_fetch_or_1@Base 12
+ __atomic_fetch_or_2@Base 12
+ __atomic_fetch_or_4@Base 12
+ __atomic_fetch_or_8@Base 12
+ __atomic_fetch_sub_16@Base 12
+ __atomic_fetch_sub_1@Base 12
+ __atomic_fetch_sub_2@Base 12
+ __atomic_fetch_sub_4@Base 12
+ __atomic_fetch_sub_8@Base 12
+ __atomic_fetch_xor_16@Base 12
+ __atomic_fetch_xor_1@Base 12
+ __atomic_fetch_xor_2@Base 12
+ __atomic_fetch_xor_4@Base 12
+ __atomic_fetch_xor_8@Base 12
+ __atomic_is_lock_free@Base 12
+ __atomic_load@Base 12
+ __atomic_load_16@Base 12
+ __atomic_load_1@Base 12
+ __atomic_load_2@Base 12
+ __atomic_load_4@Base 12
+ __atomic_load_8@Base 12
+ __atomic_nand_fetch_16@Base 12
+ __atomic_nand_fetch_1@Base 12
+ __atomic_nand_fetch_2@Base 12
+ __atomic_nand_fetch_4@Base 12
+ __atomic_nand_fetch_8@Base 12
+ __atomic_or_fetch_16@Base 12
+ __atomic_or_fetch_1@Base 12
+ __atomic_or_fetch_2@Base 12
+ __atomic_or_fetch_4@Base 12
+ __atomic_or_fetch_8@Base 12
+ __atomic_store@Base 12
+ __atomic_store_16@Base 12
+ __atomic_store_1@Base 12
+ __atomic_store_2@Base 12
+ __atomic_store_4@Base 12
+ __atomic_store_8@Base 12
+ __atomic_sub_fetch_16@Base 12
+ __atomic_sub_fetch_1@Base 12
+ __atomic_sub_fetch_2@Base 12
+ __atomic_sub_fetch_4@Base 12
+ __atomic_sub_fetch_8@Base 12
+ __atomic_test_and_set_16@Base 12
+ __atomic_test_and_set_1@Base 12
+ __atomic_test_and_set_2@Base 12
+ __atomic_test_and_set_4@Base 12
+ __atomic_test_and_set_8@Base 12
+ __atomic_xor_fetch_16@Base 12
+ __atomic_xor_fetch_1@Base 12
+ __atomic_xor_fetch_2@Base 12
+ __atomic_xor_fetch_4@Base 12
+ __atomic_xor_fetch_8@Base 12
+ __gdc_begin_catch@Base 12
+ __gdc_personality_v0@Base 12
+ _aApplyRcd1@Base 12
+ _aApplyRcd2@Base 12
+ _aApplyRcw1@Base 12
+ _aApplyRcw2@Base 12
+ _aApplyRdc1@Base 12
+ _aApplyRdc2@Base 12
+ _aApplyRdw1@Base 12
+ _aApplyRdw2@Base 12
+ _aApplyRwc1@Base 12
+ _aApplyRwc2@Base 12
+ _aApplyRwd1@Base 12
+ _aApplyRwd2@Base 12
+ _aApplycd1@Base 12
+ _aApplycd2@Base 12
+ _aApplycw1@Base 12
+ _aApplycw2@Base 12
+ _aApplydc1@Base 12
+ _aApplydc2@Base 12
+ _aApplydw1@Base 12
+ _aApplydw2@Base 12
+ _aApplywc1@Base 12
+ _aApplywc2@Base 12
+ _aApplywd1@Base 12
+ _aApplywd2@Base 12
+ _aaApply2@Base 12
+ _aaApply@Base 12
+ _aaClear@Base 12
+ _aaDelX@Base 12
+ _aaEqual@Base 12
+ _aaGetHash@Base 12
+ _aaGetRvalueX@Base 12
+ _aaGetX@Base 12
+ _aaGetY@Base 12
+ _aaInX@Base 12
+ _aaKeys@Base 12
+ _aaLen@Base 12
+ _aaRange@Base 12
+ _aaRangeEmpty@Base 12
+ _aaRangeFrontKey@Base 12
+ _aaRangeFrontValue@Base 12
+ _aaRangePopFront@Base 12
+ _aaRehash@Base 12
+ _aaValues@Base 12
+ _aaVersion@Base 12
+ _adEq2@Base 12
+ _adSort@Base 12
+ _d_allocmemory@Base 12
+ _d_arrayappendT@Base 12
+ _d_arrayappendcTX@Base 12
+ _d_arrayappendcd@Base 12
+ _d_arrayappendwd@Base 12
+ _d_arrayassign@Base 12
+ _d_arrayassign_l@Base 12
+ _d_arrayassign_r@Base 12
+ _d_arraybounds@Base 12
+ _d_arraybounds_index@Base 12
+ _d_arraybounds_indexp@Base 12
+ _d_arraybounds_slice@Base 12
+ _d_arraybounds_slicep@Base 12
+ _d_arrayboundsp@Base 12
+ _d_arraycatT@Base 12
+ _d_arraycatnTX@Base 12
+ _d_arraycopy@Base 12
+ _d_arrayctor@Base 12
+ _d_arrayliteralTX@Base 12
+ _d_arraysetassign@Base 12
+ _d_arraysetcapacity@Base 12
+ _d_arraysetctor@Base 12
+ _d_arraysetlengthT@Base 12
+ _d_arraysetlengthiT@Base 12
+ _d_arrayshrinkfit@Base 12
+ _d_assert@Base 12
+ _d_assert_msg@Base 12
+ _d_assertp@Base 12
+ _d_assocarrayliteralTX@Base 12
+ _d_callfinalizer@Base 12
+ _d_callinterfacefinalizer@Base 12
+ _d_createTrace@Base 12
+ _d_critical_init@Base 12
+ _d_critical_term@Base 12
+ _d_criticalenter2@Base 12
+ _d_criticalenter@Base 12
+ _d_criticalexit@Base 12
+ _d_delThrowable@Base 12
+ _d_delarray_t@Base 12
+ _d_delclass@Base 12
+ _d_delinterface@Base 12
+ _d_delmemory@Base 12
+ _d_delstruct@Base 12
+ _d_dso_registry@Base 12
+ _d_dynamic_cast@Base 12
+ _d_eh_swapContext@Base 12
+ _d_initMonoTime@Base 12
+ _d_interface_cast@Base 12
+ _d_isbaseof2@Base 12
+ _d_isbaseof@Base 12
+ _d_monitor_staticctor@Base 12
+ _d_monitor_staticdtor@Base 12
+ _d_monitordelete@Base 12
+ _d_monitordelete_nogc@Base 12
+ _d_monitorenter@Base 12
+ _d_monitorexit@Base 12
+ _d_newThrowable@Base 12
+ _d_newarrayT@Base 12
+ _d_newarrayU@Base 12
+ _d_newarrayiT@Base 12
+ _d_newarraymTX@Base 12
+ _d_newarraymiTX@Base 12
+ _d_newclass@Base 12
+ _d_newitemT@Base 12
+ _d_newitemU@Base 12
+ _d_newitemiT@Base 12
+ _d_print_throwable@Base 12
+ _d_register_conservative_gc@Base 12
+ _d_register_manual_gc@Base 12
+ _d_register_precise_gc@Base 12
+ _d_run_main2@Base 12
+ _d_run_main@Base 12
+ _d_setSameMutex@Base 12
+ _d_throw@Base 12
+ _d_toObject@Base 12
+ _d_traceContext@Base 12
+ _d_unittest@Base 12
+ _d_unittest_msg@Base 12
+ _d_unittestp@Base 12
+ atomic_flag_clear@Base 12
+ atomic_flag_clear_explicit@Base 12
+ atomic_flag_test_and_set@Base 12
+ atomic_flag_test_and_set_explicit@Base 12
+ atomic_signal_fence@Base 12
+ atomic_thread_fence@Base 12
+ backtrace_alloc@Base 12
+ backtrace_close@Base 12
+ backtrace_create_state@Base 12
+ backtrace_dwarf_add@Base 12
+ backtrace_free@Base 12
+ backtrace_full@Base 12
+ backtrace_get_view@Base 12
+ backtrace_initialize@Base 12
+ backtrace_open@Base 12
+ backtrace_pcinfo@Base 12
+ backtrace_print@Base 12
+ backtrace_qsort@Base 12
+ backtrace_release_view@Base 12
+ backtrace_simple@Base 12
+ backtrace_syminfo@Base 12
+ backtrace_syminfo_to_full_callback@Base 12
+ backtrace_syminfo_to_full_error_callback@Base 12
+ backtrace_uncompress_lzma@Base 12
+ backtrace_uncompress_zdebug@Base 12
+ backtrace_vector_finish@Base 12
+ backtrace_vector_grow@Base 12
+ backtrace_vector_release@Base 12
+ cimag@Base 12
+ cimagf@Base 12
+ cimagl@Base 12
+ creald@Base 12
+ crealf@Base 12
+ creall@Base 12
+ fakePureErrnoImpl@Base 12
+ fakePureReprintReal@Base 12
+ fiber_entryPoint@Base 12
+ fiber_switchContext@Base 12
+ gc_addRange@Base 12
+ gc_addRoot@Base 12
+ gc_addrOf@Base 12
+ gc_allocatedInCurrentThread@Base 12
+ gc_calloc@Base 12
+ gc_clrAttr@Base 12
+ gc_clrProxy@Base 12
+ gc_collect@Base 12
+ gc_disable@Base 12
+ gc_enable@Base 12
+ gc_extend@Base 12
+ gc_free@Base 12
+ gc_getAttr@Base 12
+ gc_getProxy@Base 12
+ gc_inFinalizer@Base 12
+ gc_init@Base 12
+ gc_init_nothrow@Base 12
+ gc_malloc@Base 12
+ gc_minimize@Base 12
+ gc_profileStats@Base 12
+ gc_qalloc@Base 12
+ gc_query@Base 12
+ gc_realloc@Base 12
+ gc_removeRange@Base 12
+ gc_removeRoot@Base 12
+ gc_reserve@Base 12
+ gc_runFinalizers@Base 12
+ gc_setAttr@Base 12
+ gc_setProxy@Base 12
+ gc_sizeOf@Base 12
+ gc_stats@Base 12
+ gc_term@Base 12
+ getErrno@Base 12
+ libat_lock_n@Base 12
+ libat_unlock_n@Base 12
+ lifetime_init@Base 12
+ onArrayIndexError@Base 12
+ onArraySliceError@Base 12
+ onAssertError@Base 12
+ onAssertErrorMsg@Base 12
+ onFinalizeError@Base 12
+ onForkError@Base 12
+ onInvalidMemoryOperationError@Base 12
+ onOutOfMemoryError@Base 12
+ onOutOfMemoryErrorNoGC@Base 12
+ onRangeError@Base 12
+ onUnicodeError@Base 12
+ onUnittestErrorMsg@Base 12
+ pcinfoCallback@Base 12
+ pcinfoErrorCallback@Base 12
+ perf_event_open@Base 12
+ profilegc_setlogfilename@Base 12
+ register_default_gcs@Base 12
+ rt_args@Base 12
+ rt_attachDisposeEvent@Base 12
+ rt_cArgs@Base 12
+ rt_cmdline_enabled@Base 12
+ rt_detachDisposeEvent@Base 12
+ rt_envvars_enabled@Base 12
+ rt_finalize2@Base 12
+ rt_finalize@Base 12
+ rt_finalizeFromGC@Base 12
+ rt_getCollectHandler@Base 12
+ rt_getTraceHandler@Base 12
+ rt_hasFinalizerInSegment@Base 12
+ rt_init@Base 12
+ rt_loadLibrary@Base 12
+ rt_moduleCtor@Base 12
+ rt_moduleDtor@Base 12
+ rt_moduleTlsCtor@Base 12
+ rt_moduleTlsDtor@Base 12
+ rt_options@Base 12
+ rt_setCollectHandler@Base 12
+ rt_setTraceHandler@Base 12
+ rt_term@Base 12
+ rt_trapExceptions@Base 12
+ rt_unloadLibrary@Base 12
+ runModuleUnitTests@Base 12
+ setErrno@Base 12
+ simpleCallback@Base 12
+ simpleErrorCallback@Base 12
+ syminfoCallback2@Base 12
+ syminfoCallback@Base 12
+ thread_attachThis@Base 12
+ thread_detachByAddr@Base 12
+ thread_detachInstance@Base 12
+ thread_detachThis@Base 12
+ thread_enterCriticalRegion@Base 12
+ thread_entryPoint@Base 12
+ thread_exitCriticalRegion@Base 12
+ thread_inCriticalRegion@Base 12
+ thread_init@Base 12
+ thread_isMainThread@Base 12
+ thread_joinAll@Base 12
+ thread_processGCMarks@Base 12
+ thread_resumeAll@Base 12
+ thread_resumeHandler@Base 12
+ thread_scanAll@Base 12
+ thread_scanAllType@Base 12
+ thread_setGCSignals@Base 12
+ thread_setThis@Base 12
+ thread_stackBottom@Base 12
+ thread_stackTop@Base 12
+ thread_suspendAll@Base 12
+ thread_suspendHandler@Base 12
+ thread_term@Base 12
+ tipc_addr@Base 12
+ tipc_cluster@Base 12
+ tipc_node@Base 12
+ tipc_zone@Base 12
+libgphobos.so.3 libgphobos3 #MINVER#
+ CPU_ALLOC@Base 12
+ CPU_ALLOC_SIZE@Base 12
+ CPU_COUNT@Base 12
+ CPU_COUNT_S@Base 12
+ CPU_FREE@Base 12
+ CPU_ISSET@Base 12
+ CPU_SET@Base 12
+ LOG_MASK@Base 12
+ LOG_UPTO@Base 12
+ SIGRTMAX@Base 12
+ SIGRTMIN@Base 12
+ S_TYPEISMQ@Base 12
+ S_TYPEISSEM@Base 12
+ S_TYPEISSHM@Base 12
+ _D101TypeInfo_E4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl7AddType6__initZ@Base 12
+ _D101TypeInfo_S3std5range__T5retroTASQw8datetime8timezone13PosixTimeZone10TransitionZQCfFQCcZ__T6ResultZQi6__initZ@Base 12
+ _D101TypeInfo_S3std8typecons__T10RefCountedTSQBe3net4curl3FTP4ImplVEQCbQCa24RefCountedAutoInitializei1ZQCu6__initZ@Base 12
+ _D101TypeInfo_S4core8demangle__T6mangleTFMDFyPS6object10ModuleInfoZiZiZQBnFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D101TypeInfo_S4core8internal8lifetime__T10emplaceRefTS3std3uni17CodepointIntervalTQBdTQBhZQByFKQBqKQBuZ1S6__initZ@Base 12
+ _D102TypeInfo_S3std8typecons__T10RefCountedTSQBe3net4curl4HTTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv6__initZ@Base 12
+ _D102TypeInfo_S3std8typecons__T10RefCountedTSQBe3net4curl4SMTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv6__initZ@Base 12
+ _D102TypeInfo_S4core8internal8lifetime__T10emplaceRefTS3std5regexQBt2ir10NamedGroupTQBeTQBiZQBzFKQBrKQBvZ1S6__initZ@Base 12
+ _D102TypeInfo_xE4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl7AddType6__initZ@Base 12
+ _D103TypeInfo_S3std3uni__T4TrieTtTwVmi1114112TSQBgQBf__T9sliceBitsVmi9Vmi21ZQuTSQCnQCm__TQBhVmi0Vmi9ZQBtZQDb6__initZ@Base 12
+ _D103TypeInfo_S4core4time8Duration__T5splitVAyaa5_686f757273VQra7_6d696e75746573ZQBsMxFNaNbNiNfZ10SplitUnits6__initZ@Base 12
+ _D103TypeInfo_S4core4time8Duration__T5splitVAyaa7_7365636f6e6473VQva5_6e73656373ZQBsMxFNaNbNiNfZ10SplitUnits6__initZ@Base 12
+ _D103TypeInfo_S4core4time8Duration__T5splitVAyaa7_7365636f6e6473VQva5_7573656373ZQBsMxFNaNbNiNfZ10SplitUnits6__initZ@Base 12
+ _D105TypeInfo_E4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl10IsDelegate6__initZ@Base 12
+ _D105TypeInfo_S3std12experimental9allocator15building_blocks24ascending_page_allocator22AscendingPageAllocator6__initZ@Base 12
+ _D105TypeInfo_S3std8typecons__T10RefCountedTSQBe4file15DirIteratorImplVEQCfQCe24RefCountedAutoInitializei0ZQCy6__initZ@Base 12
+ _D105TypeInfo_S3std9algorithm9searching__T9findSplitVAyaa6_61203d3d2062TQtTQwZQBkFQBdQBgZ__T6ResultTQBvTQBzZQq6__initZ@Base 12
+ _D107TypeInfo_S3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa5State6__initZ@Base 12
+ _D107TypeInfo_S3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa5Trace6__initZ@Base 12
+ _D109TypeInfo_S3std5regex8internal6parser__T11postprocessTaZQqFNeKSQCaQBzQBw2ir__T5RegexTaZQjZ__T10FixedStackTkZQp6__initZ@Base 12
+ _D10TypeInfo_a6__initZ@Base 12
+ _D10TypeInfo_a6__vtblZ@Base 12
+ _D10TypeInfo_a7__ClassZ@Base 12
+ _D10TypeInfo_b6__initZ@Base 12
+ _D10TypeInfo_b6__vtblZ@Base 12
+ _D10TypeInfo_b7__ClassZ@Base 12
+ _D10TypeInfo_c6__initZ@Base 12
+ _D10TypeInfo_c6__vtblZ@Base 12
+ _D10TypeInfo_c7__ClassZ@Base 12
+ _D10TypeInfo_d6__initZ@Base 12
+ _D10TypeInfo_d6__vtblZ@Base 12
+ _D10TypeInfo_d7__ClassZ@Base 12
+ _D10TypeInfo_e6__initZ@Base 12
+ _D10TypeInfo_e6__vtblZ@Base 12
+ _D10TypeInfo_e7__ClassZ@Base 12
+ _D10TypeInfo_f6__initZ@Base 12
+ _D10TypeInfo_f6__vtblZ@Base 12
+ _D10TypeInfo_f7__ClassZ@Base 12
+ _D10TypeInfo_g6__initZ@Base 12
+ _D10TypeInfo_g6__vtblZ@Base 12
+ _D10TypeInfo_g7__ClassZ@Base 12
+ _D10TypeInfo_h6__initZ@Base 12
+ _D10TypeInfo_h6__vtblZ@Base 12
+ _D10TypeInfo_h7__ClassZ@Base 12
+ _D10TypeInfo_i6__initZ@Base 12
+ _D10TypeInfo_i6__vtblZ@Base 12
+ _D10TypeInfo_i7__ClassZ@Base 12
+ _D10TypeInfo_j6__initZ@Base 12
+ _D10TypeInfo_j6__vtblZ@Base 12
+ _D10TypeInfo_j7__ClassZ@Base 12
+ _D10TypeInfo_k6__initZ@Base 12
+ _D10TypeInfo_k6__vtblZ@Base 12
+ _D10TypeInfo_k7__ClassZ@Base 12
+ _D10TypeInfo_l6__initZ@Base 12
+ _D10TypeInfo_l6__vtblZ@Base 12
+ _D10TypeInfo_l7__ClassZ@Base 12
+ _D10TypeInfo_m6__initZ@Base 12
+ _D10TypeInfo_m6__vtblZ@Base 12
+ _D10TypeInfo_m7__ClassZ@Base 12
+ _D10TypeInfo_n6__initZ@Base 12
+ _D10TypeInfo_n6__vtblZ@Base 12
+ _D10TypeInfo_n7__ClassZ@Base 12
+ _D10TypeInfo_o6__initZ@Base 12
+ _D10TypeInfo_o6__vtblZ@Base 12
+ _D10TypeInfo_o7__ClassZ@Base 12
+ _D10TypeInfo_p6__initZ@Base 12
+ _D10TypeInfo_p6__vtblZ@Base 12
+ _D10TypeInfo_p7__ClassZ@Base 12
+ _D10TypeInfo_q6__initZ@Base 12
+ _D10TypeInfo_q6__vtblZ@Base 12
+ _D10TypeInfo_q7__ClassZ@Base 12
+ _D10TypeInfo_r6__initZ@Base 12
+ _D10TypeInfo_r6__vtblZ@Base 12
+ _D10TypeInfo_r7__ClassZ@Base 12
+ _D10TypeInfo_s6__initZ@Base 12
+ _D10TypeInfo_s6__vtblZ@Base 12
+ _D10TypeInfo_s7__ClassZ@Base 12
+ _D10TypeInfo_t6__initZ@Base 12
+ _D10TypeInfo_t6__vtblZ@Base 12
+ _D10TypeInfo_t7__ClassZ@Base 12
+ _D10TypeInfo_u6__initZ@Base 12
+ _D10TypeInfo_u6__vtblZ@Base 12
+ _D10TypeInfo_u7__ClassZ@Base 12
+ _D10TypeInfo_v6__initZ@Base 12
+ _D10TypeInfo_v6__vtblZ@Base 12
+ _D10TypeInfo_v7__ClassZ@Base 12
+ _D10TypeInfo_w6__initZ@Base 12
+ _D10TypeInfo_w6__vtblZ@Base 12
+ _D10TypeInfo_w7__ClassZ@Base 12
+ _D110TypeInfo_S3std3uni__T16UnicodeSetParserTSQBf5regex8internal6parser__T6ParserTAyaTSQCuQBpQBmQBg7CodeGenZQBiZQDi6__initZ@Base 12
+ _D110TypeInfo_S3std6string__T12LineSplitterVEQBe8typecons__T4FlagVAyaa14_6b6565705465726d696e61746f72ZQBqi0TQBqZQDf6__initZ@Base 12
+ _D111TypeInfo_S3std12experimental9allocator15building_blocks24ascending_page_allocator28SharedAscendingPageAllocator6__initZ@Base 12
+ _D111TypeInfo_S3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi8Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi8ZQBtZQDj6__initZ@Base 12
+ _D111TypeInfo_S3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi9Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi9ZQBtZQDj6__initZ@Base 12
+ _D112TypeInfo_OS3std12experimental9allocator15building_blocks24ascending_page_allocator28SharedAscendingPageAllocator6__initZ@Base 12
+ _D112TypeInfo_S3std8typecons__T5TupleTSQy4conv__T7toCharsVii10TaVEQBz5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQDf6__initZ@Base 12
+ _D113TypeInfo_S4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZvZQBzFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D114TypeInfo_S3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi12ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm6__initZ@Base 12
+ _D114TypeInfo_S3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi13ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm6__initZ@Base 12
+ _D114TypeInfo_S3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi14ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm6__initZ@Base 12
+ _D115TypeInfo_S4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZQBkZQCbFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D115TypeInfo_S4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi0ZQpZQCf6__initZ@Base 12
+ _D115TypeInfo_S4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi1ZQpZQCf6__initZ@Base 12
+ _D115TypeInfo_xS3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi12ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm6__initZ@Base 12
+ _D115TypeInfo_xS3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi13ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm6__initZ@Base 12
+ _D115TypeInfo_xS3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi14ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm6__initZ@Base 12
+ _D116TypeInfo_S3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCv5range__T10OnlyResultTaZQpZQDj6__initZ@Base 12
+ _D116TypeInfo_xS4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi0ZQpZQCf6__initZ@Base 12
+ _D116TypeInfo_xS4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi1ZQpZQCf6__initZ@Base 12
+ _D117TypeInfo_S3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCw5range__T10OnlyResultTaZQpZQDk6__initZ@Base 12
+ _D117TypeInfo_S3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCw5range__T10OnlyResultTaZQpZQDk6__initZ@Base 12
+ _D118TypeInfo_S3std8typecons__T10RefCountedTSQBe3net4curl3FTP4ImplVEQCbQCa24RefCountedAutoInitializei1ZQCu15RefCountedStore6__initZ@Base 12
+ _D118TypeInfo_S4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu6__initZ@Base 12
+ _D118TypeInfo_S4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu6__initZ@Base 12
+ _D119TypeInfo_E3std3uni__T16UnicodeSetParserTSQBf5regex8internal6parser__T6ParserTAyaTSQCuQBpQBmQBg7CodeGenZQBiZQDi8Operator6__initZ@Base 12
+ _D119TypeInfo_S3std8typecons__T10RefCountedTSQBe3net4curl4HTTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv15RefCountedStore6__initZ@Base 12
+ _D119TypeInfo_S3std8typecons__T10RefCountedTSQBe3net4curl4SMTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv15RefCountedStore6__initZ@Base 12
+ _D11TypeInfo_Aa6__initZ@Base 12
+ _D11TypeInfo_Aa6__vtblZ@Base 12
+ _D11TypeInfo_Aa7__ClassZ@Base 12
+ _D11TypeInfo_Ab6__initZ@Base 12
+ _D11TypeInfo_Ab6__vtblZ@Base 12
+ _D11TypeInfo_Ab7__ClassZ@Base 12
+ _D11TypeInfo_Ac6__initZ@Base 12
+ _D11TypeInfo_Ac6__vtblZ@Base 12
+ _D11TypeInfo_Ac7__ClassZ@Base 12
+ _D11TypeInfo_Ad6__initZ@Base 12
+ _D11TypeInfo_Ad6__vtblZ@Base 12
+ _D11TypeInfo_Ad7__ClassZ@Base 12
+ _D11TypeInfo_Ae6__initZ@Base 12
+ _D11TypeInfo_Ae6__vtblZ@Base 12
+ _D11TypeInfo_Ae7__ClassZ@Base 12
+ _D11TypeInfo_Af6__initZ@Base 12
+ _D11TypeInfo_Af6__vtblZ@Base 12
+ _D11TypeInfo_Af7__ClassZ@Base 12
+ _D11TypeInfo_Ag6__initZ@Base 12
+ _D11TypeInfo_Ag6__vtblZ@Base 12
+ _D11TypeInfo_Ag7__ClassZ@Base 12
+ _D11TypeInfo_Ah6__initZ@Base 12
+ _D11TypeInfo_Ah6__vtblZ@Base 12
+ _D11TypeInfo_Ah7__ClassZ@Base 12
+ _D11TypeInfo_Ai6__initZ@Base 12
+ _D11TypeInfo_Ai6__vtblZ@Base 12
+ _D11TypeInfo_Ai7__ClassZ@Base 12
+ _D11TypeInfo_Aj6__initZ@Base 12
+ _D11TypeInfo_Aj6__vtblZ@Base 12
+ _D11TypeInfo_Aj7__ClassZ@Base 12
+ _D11TypeInfo_Ak6__initZ@Base 12
+ _D11TypeInfo_Ak6__vtblZ@Base 12
+ _D11TypeInfo_Ak7__ClassZ@Base 12
+ _D11TypeInfo_Al6__initZ@Base 12
+ _D11TypeInfo_Al6__vtblZ@Base 12
+ _D11TypeInfo_Al7__ClassZ@Base 12
+ _D11TypeInfo_Am6__initZ@Base 12
+ _D11TypeInfo_Am6__vtblZ@Base 12
+ _D11TypeInfo_Am7__ClassZ@Base 12
+ _D11TypeInfo_Ao6__initZ@Base 12
+ _D11TypeInfo_Ao6__vtblZ@Base 12
+ _D11TypeInfo_Ao7__ClassZ@Base 12
+ _D11TypeInfo_Ap6__initZ@Base 12
+ _D11TypeInfo_Ap6__vtblZ@Base 12
+ _D11TypeInfo_Ap7__ClassZ@Base 12
+ _D11TypeInfo_Aq6__initZ@Base 12
+ _D11TypeInfo_Aq6__vtblZ@Base 12
+ _D11TypeInfo_Aq7__ClassZ@Base 12
+ _D11TypeInfo_Ar6__initZ@Base 12
+ _D11TypeInfo_Ar6__vtblZ@Base 12
+ _D11TypeInfo_Ar7__ClassZ@Base 12
+ _D11TypeInfo_As6__initZ@Base 12
+ _D11TypeInfo_As6__vtblZ@Base 12
+ _D11TypeInfo_As7__ClassZ@Base 12
+ _D11TypeInfo_At6__initZ@Base 12
+ _D11TypeInfo_At6__vtblZ@Base 12
+ _D11TypeInfo_At7__ClassZ@Base 12
+ _D11TypeInfo_Au6__initZ@Base 12
+ _D11TypeInfo_Au6__vtblZ@Base 12
+ _D11TypeInfo_Au7__ClassZ@Base 12
+ _D11TypeInfo_Av6__initZ@Base 12
+ _D11TypeInfo_Av6__vtblZ@Base 12
+ _D11TypeInfo_Av7__ClassZ@Base 12
+ _D11TypeInfo_Aw6__initZ@Base 12
+ _D11TypeInfo_Aw6__vtblZ@Base 12
+ _D11TypeInfo_Aw7__ClassZ@Base 12
+ _D11TypeInfo_Oa6__initZ@Base 12
+ _D11TypeInfo_Ou6__initZ@Base 12
+ _D11TypeInfo_xa6__initZ@Base 12
+ _D11TypeInfo_xb6__initZ@Base 12
+ _D11TypeInfo_xd6__initZ@Base 12
+ _D11TypeInfo_xe6__initZ@Base 12
+ _D11TypeInfo_xf6__initZ@Base 12
+ _D11TypeInfo_xh6__initZ@Base 12
+ _D11TypeInfo_xi6__initZ@Base 12
+ _D11TypeInfo_xk6__initZ@Base 12
+ _D11TypeInfo_xl6__initZ@Base 12
+ _D11TypeInfo_xm6__initZ@Base 12
+ _D11TypeInfo_xt6__initZ@Base 12
+ _D11TypeInfo_xu6__initZ@Base 12
+ _D11TypeInfo_xv6__initZ@Base 12
+ _D11TypeInfo_xw6__initZ@Base 12
+ _D11TypeInfo_ya6__initZ@Base 12
+ _D11TypeInfo_yb6__initZ@Base 12
+ _D11TypeInfo_yh6__initZ@Base 12
+ _D11TypeInfo_yi6__initZ@Base 12
+ _D11TypeInfo_yk6__initZ@Base 12
+ _D11TypeInfo_yu6__initZ@Base 12
+ _D120TypeInfo_AE3std3uni__T16UnicodeSetParserTSQBf5regex8internal6parser__T6ParserTAyaTSQCuQBpQBmQBg7CodeGenZQBiZQDi8Operator6__initZ@Base 12
+ _D120TypeInfo_S3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTSQCf3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQDc6__initZ@Base 12
+ _D120TypeInfo_S3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTSQCf3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplZQDc6__initZ@Base 12
+ _D120TypeInfo_S4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw6__initZ@Base 12
+ _D120TypeInfo_xE3std3uni__T16UnicodeSetParserTSQBf5regex8internal6parser__T6ParserTAyaTSQCuQBpQBmQBg7CodeGenZQBiZQDi8Operator6__initZ@Base 12
+ _D121TypeInfo_AxE3std3uni__T16UnicodeSetParserTSQBf5regex8internal6parser__T6ParserTAyaTSQCuQBpQBmQBg7CodeGenZQBiZQDi8Operator6__initZ@Base 12
+ _D121TypeInfo_S3std8typecons__T10RefCountedTSQBe3net4curl3FTP4ImplVEQCbQCa24RefCountedAutoInitializei1ZQCu15RefCountedStoreQCk6__initZ@Base 12
+ _D121TypeInfo_S3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCw5range__T10OnlyResultTaZQpTQDdZQDo6__initZ@Base 12
+ _D121TypeInfo_S3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCw5range__T10OnlyResultTaZQpTQDdZQDo6__initZ@Base 12
+ _D121TypeInfo_xAE3std3uni__T16UnicodeSetParserTSQBf5regex8internal6parser__T6ParserTAyaTSQCuQBpQBmQBg7CodeGenZQBiZQDi8Operator6__initZ@Base 12
+ _D122TypeInfo_S3std8typecons__T10RefCountedTSQBe3net4curl4HTTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv15RefCountedStoreQCk6__initZ@Base 12
+ _D122TypeInfo_S3std8typecons__T10RefCountedTSQBe3net4curl4SMTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv15RefCountedStoreQCk6__initZ@Base 12
+ _D122TypeInfo_S3std8typecons__T10RefCountedTSQBe4file15DirIteratorImplVEQCfQCe24RefCountedAutoInitializei0ZQCy15RefCountedStore6__initZ@Base 12
+ _D123TypeInfo_S4core8internal8lifetime__T10emplaceRefTS3std11concurrency__T4ListTSQBbQBa7MessageZQw4NodeTQBzTQBcZQCuFKQCmKQBpZ1S6__initZ@Base 12
+ _D125TypeInfo_S4core8internal8lifetime__T10emplaceRefTS3std4file15DirIteratorImplTQBcTAyaTEQBkQBj8SpanModeTbZQCqFKQCiKQBgKQBgKbZ1S6__initZ@Base 12
+ _D127TypeInfo_S3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi8Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi8ZQBtZQDj14ConstructState6__initZ@Base 12
+ _D127TypeInfo_S3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi9Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi9ZQBtZQDj14ConstructState6__initZ@Base 12
+ _D127TypeInfo_S3std8typecons__T10RefCountedTSQBe4file15DirIteratorImplVEQCfQCe24RefCountedAutoInitializei0ZQCy15RefCountedStore4Impl6__initZ@Base 12
+ _D128TypeInfo_xS3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi8Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi8ZQBtZQDj14ConstructState6__initZ@Base 12
+ _D128TypeInfo_xS3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi9Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi9ZQBtZQDj14ConstructState6__initZ@Base 12
+ _D129TypeInfo_G2S3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi8Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi8ZQBtZQDj14ConstructState6__initZ@Base 12
+ _D129TypeInfo_G2S3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi9Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi9ZQBtZQDj14ConstructState6__initZ@Base 12
+ _D129TypeInfo_S3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi8Vmi21ZQuTSQDnQDm__TQBhVmi0Vmi8ZQBtZQEb6__initZ@Base 12
+ _D12TypeInfo_AAf6__initZ@Base 12
+ _D12TypeInfo_AOa6__initZ@Base 12
+ _D12TypeInfo_AOu6__initZ@Base 12
+ _D12TypeInfo_Axa6__initZ@Base 12
+ _D12TypeInfo_Axa6__vtblZ@Base 12
+ _D12TypeInfo_Axa7__ClassZ@Base 12
+ _D12TypeInfo_Axf6__initZ@Base 12
+ _D12TypeInfo_Axh6__initZ@Base 12
+ _D12TypeInfo_Axi6__initZ@Base 12
+ _D12TypeInfo_Axk6__initZ@Base 12
+ _D12TypeInfo_Axm6__initZ@Base 12
+ _D12TypeInfo_Axu6__initZ@Base 12
+ _D12TypeInfo_Axv6__initZ@Base 12
+ _D12TypeInfo_Axw6__initZ@Base 12
+ _D12TypeInfo_Aya6__initZ@Base 12
+ _D12TypeInfo_Aya6__vtblZ@Base 12
+ _D12TypeInfo_Aya7__ClassZ@Base 12
+ _D12TypeInfo_Ayh6__initZ@Base 12
+ _D12TypeInfo_Ayk6__initZ@Base 12
+ _D12TypeInfo_Ayu6__initZ@Base 12
+ _D12TypeInfo_FZv6__initZ@Base 12
+ _D12TypeInfo_G2m6__initZ@Base 12
+ _D12TypeInfo_G3m6__initZ@Base 12
+ _D12TypeInfo_G4a6__initZ@Base 12
+ _D12TypeInfo_G4m6__initZ@Base 12
+ _D12TypeInfo_G8h6__initZ@Base 12
+ _D12TypeInfo_Hmb6__initZ@Base 12
+ _D12TypeInfo_Hmm6__initZ@Base 12
+ _D12TypeInfo_Oxk6__initZ@Base 12
+ _D12TypeInfo_Pxa6__initZ@Base 12
+ _D12TypeInfo_Pxh6__initZ@Base 12
+ _D12TypeInfo_Pxv6__initZ@Base 12
+ _D12TypeInfo_xAa6__initZ@Base 12
+ _D12TypeInfo_xAf6__initZ@Base 12
+ _D12TypeInfo_xAh6__initZ@Base 12
+ _D12TypeInfo_xAi6__initZ@Base 12
+ _D12TypeInfo_xAk6__initZ@Base 12
+ _D12TypeInfo_xAm6__initZ@Base 12
+ _D12TypeInfo_xAu6__initZ@Base 12
+ _D12TypeInfo_xAv6__initZ@Base 12
+ _D12TypeInfo_xAw6__initZ@Base 12
+ _D12TypeInfo_xPh6__initZ@Base 12
+ _D12TypeInfo_xPv6__initZ@Base 12
+ _D12TypeInfo_yAa6__initZ@Base 12
+ _D130TypeInfo_xG2S3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi8Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi8ZQBtZQDj14ConstructState6__initZ@Base 12
+ _D130TypeInfo_xG2S3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi9Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi9ZQBtZQDj14ConstructState6__initZ@Base 12
+ _D130TypeInfo_xS3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi8Vmi21ZQuTSQDnQDm__TQBhVmi0Vmi8ZQBtZQEb6__initZ@Base 12
+ _D131TypeInfo_S3std3uni__T4TrieThTwVmi1114112TSQBgQBf__T9sliceBitsVmi13Vmi21ZQvTSQCoQCn__TQBiVmi6Vmi13ZQBvTSQDpQDo__TQCjVmi0Vmi6ZQCvZQEd6__initZ@Base 12
+ _D131TypeInfo_S3std3uni__T4TrieTtTwVmi1114112TSQBgQBf__T9sliceBitsVmi13Vmi21ZQvTSQCoQCn__TQBiVmi5Vmi13ZQBvTSQDpQDo__TQCjVmi0Vmi5ZQCvZQEd6__initZ@Base 12
+ _D131TypeInfo_S3std3uni__T4TrieTtTwVmi1114112TSQBgQBf__T9sliceBitsVmi13Vmi21ZQvTSQCoQCn__TQBiVmi6Vmi13ZQBvTSQDpQDo__TQCjVmi0Vmi6ZQCvZQEd6__initZ@Base 12
+ _D133TypeInfo_S3std5range__T5chainTSQv3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result6__initZ@Base 12
+ _D133TypeInfo_S3std5range__T5chainTSQv3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result6__initZ@Base 12
+ _D134TypeInfo_E3std3uni__T16UnicodeSetParserTSQBf5regex8internal6parser__T6ParserTAyaTSQCuQBpQBmQBg7CodeGenZQBiZQDi13parseCharTermMFZ5State6__initZ@Base 12
+ _D134TypeInfo_xS3std5range__T5chainTSQv3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result6__initZ@Base 12
+ _D135TypeInfo_S3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh5State6__initZ@Base 12
+ _D136TypeInfo_S3std10functional__T7memoizeS_DQBe5regex__T9regexImplTAyaZQpFNfxAyaAxaZSQCtQBp8internal2ir__T5RegexTaZQjVii8ZQDlFxQByQByZ5Value6__initZ@Base 12
+ _D136TypeInfo_S3std5range__T11SortedRangeTACQBd3zip13ArchiveMemberSQCaQx10ZipArchive5buildMFNaNfZ9__lambda6VEQDqQDp18SortedRangeOptionsi0ZQEg6__initZ@Base 12
+ _D136TypeInfo_S3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei1TAkZQDm6__initZ@Base 12
+ _D136TypeInfo_S3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei2TAkZQDm6__initZ@Base 12
+ _D136TypeInfo_S3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm6__initZ@Base 12
+ _D136TypeInfo_S3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei1TAkZQDm6__initZ@Base 12
+ _D136TypeInfo_S3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei2TAkZQDm6__initZ@Base 12
+ _D136TypeInfo_S3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei0TAkZQDm6__initZ@Base 12
+ _D136TypeInfo_S3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei1TAkZQDm6__initZ@Base 12
+ _D136TypeInfo_S3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei2TAkZQDm6__initZ@Base 12
+ _D136TypeInfo_S3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm6__initZ@Base 12
+ _D136TypeInfo_S3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei0TAkZQDm6__initZ@Base 12
+ _D136TypeInfo_S3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei1TAkZQDm6__initZ@Base 12
+ _D136TypeInfo_S3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei2TAkZQDm6__initZ@Base 12
+ _D137TypeInfo_xS3std10functional__T7memoizeS_DQBe5regex__T9regexImplTAyaZQpFNfxAyaAxaZSQCtQBp8internal2ir__T5RegexTaZQjVii8ZQDlFxQByQByZ5Value6__initZ@Base 12
+ _D138TypeInfo_S3std3uni__T5StackTEQtQr__T16UnicodeSetParserTSQBu5regex8internal6parser__T6ParserTAyaTSQDjQBpQBmQBg7CodeGenZQBiZQDi8OperatorZQEk6__initZ@Base 12
+ _D138TypeInfo_S3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQDq3uni21DecompressedIntervalsZQDu6__initZ@Base 12
+ _D138TypeInfo_S3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa4_615b315dVQpa1_61ZQBhTSQDq3uni21DecompressedIntervalsZQDu6__initZ@Base 12
+ _D139TypeInfo_S3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi7ZQrTSQCbQCa__TQBeTkVmi11ZQBpTSQDaQCz__TQCdTkVmi15ZQCoTSQDzQDy__TQDcTbVmi1ZQDmZQEl6__initZ@Base 12
+ _D139TypeInfo_S3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi7Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi7ZQCvZQEl6__initZ@Base 12
+ _D139TypeInfo_S3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi8Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi8ZQCvZQEl6__initZ@Base 12
+ _D139TypeInfo_S3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi9Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi9ZQCvZQEl6__initZ@Base 12
+ _D139TypeInfo_S3std3uni__T11TrieBuilderThTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl6__initZ@Base 12
+ _D139TypeInfo_S3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi5Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi5ZQCvZQEl6__initZ@Base 12
+ _D139TypeInfo_S3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl6__initZ@Base 12
+ _D139TypeInfo_xS3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQDq3uni21DecompressedIntervalsZQDu6__initZ@Base 12
+ _D139TypeInfo_xS3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa4_615b315dVQpa1_61ZQBhTSQDq3uni21DecompressedIntervalsZQDu6__initZ@Base 12
+ _D13TypeInfo_AAya6__initZ@Base 12
+ _D13TypeInfo_AHmb6__initZ@Base 12
+ _D13TypeInfo_APxa6__initZ@Base 12
+ _D13TypeInfo_AxPv6__initZ@Base 12
+ _D13TypeInfo_AyAa6__initZ@Base 12
+ _D13TypeInfo_DFZv6__initZ@Base 12
+ _D13TypeInfo_Enum6__initZ@Base 12
+ _D13TypeInfo_Enum6__vtblZ@Base 12
+ _D13TypeInfo_Enum7__ClassZ@Base 12
+ _D13TypeInfo_G12a6__initZ@Base 12
+ _D13TypeInfo_G48a6__initZ@Base 12
+ _D13TypeInfo_xAPv6__initZ@Base 12
+ _D13TypeInfo_xAya6__initZ@Base 12
+ _D13TypeInfo_xAyu6__initZ@Base 12
+ _D13TypeInfo_xG2m6__initZ@Base 12
+ _D13TypeInfo_xG3m6__initZ@Base 12
+ _D13TypeInfo_xG4a6__initZ@Base 12
+ _D13TypeInfo_xG4m6__initZ@Base 12
+ _D13TypeInfo_xG8h6__initZ@Base 12
+ _D140TypeInfo_S3std9algorithm9iteration__T12FilterResultSQBq8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda1TSQDn5range__T4iotaTmTmZQkFmmZ6ResultZQDw6__initZ@Base 12
+ _D140TypeInfo_xS3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi7ZQrTSQCbQCa__TQBeTkVmi11ZQBpTSQDaQCz__TQCdTkVmi15ZQCoTSQDzQDy__TQDcTbVmi1ZQDmZQEl6__initZ@Base 12
+ _D142TypeInfo_S3std9algorithm9iteration__T12FilterResultSQBq8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda3TSQDn5range__T4iotaTmTxmZQlFmxmZ6ResultZQDy6__initZ@Base 12
+ _D144TypeInfo_S3std5range__T11SortedRangeTASQBd5regex8internal2ir10NamedGroupVAyaa15_612e6e616d65203c20622e6e616d65VEQDyQDx18SortedRangeOptionsi0ZQEo6__initZ@Base 12
+ _D144TypeInfo_S3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl5State6__initZ@Base 12
+ _D144TypeInfo_S3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl5Trace6__initZ@Base 12
+ _D145TypeInfo_S3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCv5range__T10OnlyResultTaZQpTSQEaQDd__TQDcTAxaZQDkFQiZQCvZQEm6__initZ@Base 12
+ _D14TypeInfo_Array6__initZ@Base 12
+ _D14TypeInfo_Array6__vtblZ@Base 12
+ _D14TypeInfo_Array7__ClassZ@Base 12
+ _D14TypeInfo_AxAya6__initZ@Base 12
+ _D14TypeInfo_Class6__initZ@Base 12
+ _D14TypeInfo_Class6__vtblZ@Base 12
+ _D14TypeInfo_Class7__ClassZ@Base 12
+ _D14TypeInfo_Const6__initZ@Base 12
+ _D14TypeInfo_Const6__vtblZ@Base 12
+ _D14TypeInfo_Const7__ClassZ@Base 12
+ _D14TypeInfo_FPvZv6__initZ@Base 12
+ _D14TypeInfo_HAxam6__initZ@Base 12
+ _D14TypeInfo_Inout6__initZ@Base 12
+ _D14TypeInfo_Inout6__vtblZ@Base 12
+ _D14TypeInfo_Inout7__ClassZ@Base 12
+ _D14TypeInfo_Tuple6__initZ@Base 12
+ _D14TypeInfo_Tuple6__vtblZ@Base 12
+ _D14TypeInfo_Tuple7__ClassZ@Base 12
+ _D14TypeInfo_xAAya6__initZ@Base 12
+ _D14TypeInfo_xDFZv6__initZ@Base 12
+ _D14TypeInfo_xG12a6__initZ@Base 12
+ _D14TypeInfo_xG48a6__initZ@Base 12
+ _D150TypeInfo_S3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQDu8internal14unicode_tables9CompEntryZQEg6__initZ@Base 12
+ _D151TypeInfo_S3std9algorithm9iteration__T12FilterResultS_DQBs4file10dirEntriesFAyaQdEQCtQBb8SpanModebZ1fMFNaNbNfSQDvQCd8DirEntryZbTSQEoQCw11DirIteratorZQEh6__initZ@Base 12
+ _D151TypeInfo_xS3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQDu8internal14unicode_tables9CompEntryZQEg6__initZ@Base 12
+ _D155TypeInfo_S3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi7Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi7ZQCvZQEl14ConstructState6__initZ@Base 12
+ _D155TypeInfo_S3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi8Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi8ZQCvZQEl14ConstructState6__initZ@Base 12
+ _D155TypeInfo_S3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi9Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi9ZQCvZQEl14ConstructState6__initZ@Base 12
+ _D155TypeInfo_S3std3uni__T11TrieBuilderThTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl14ConstructState6__initZ@Base 12
+ _D155TypeInfo_S3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi5Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi5ZQCvZQEl14ConstructState6__initZ@Base 12
+ _D155TypeInfo_S3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl14ConstructState6__initZ@Base 12
+ _D155TypeInfo_S3std9algorithm9iteration__T8splitterVAyaa6_61203d3d2062VEQCf8typecons__T4FlagVQBpa14_6b656570536570617261746f7273ZQBqi0TAxaTQDjZQDxFQmQDtZ6Result6__initZ@Base 12
+ _D156TypeInfo_S3std9algorithm9iteration__T8splitterVAyaa6_61203d3d2062VEQCf8typecons__T4FlagVQBpa14_6b656570536570617261746f7273ZQBqi0TQDfTQDjZQDxFQDrQDuZ6Result6__initZ@Base 12
+ _D156TypeInfo_xS3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi7Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi7ZQCvZQEl14ConstructState6__initZ@Base 12
+ _D156TypeInfo_xS3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi8Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi8ZQCvZQEl14ConstructState6__initZ@Base 12
+ _D156TypeInfo_xS3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi9Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi9ZQCvZQEl14ConstructState6__initZ@Base 12
+ _D156TypeInfo_xS3std3uni__T11TrieBuilderThTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl14ConstructState6__initZ@Base 12
+ _D156TypeInfo_xS3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi5Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi5ZQCvZQEl14ConstructState6__initZ@Base 12
+ _D156TypeInfo_xS3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl14ConstructState6__initZ@Base 12
+ _D157TypeInfo_G3S3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi7Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi7ZQCvZQEl14ConstructState6__initZ@Base 12
+ _D157TypeInfo_G3S3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi8Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi8ZQCvZQEl14ConstructState6__initZ@Base 12
+ _D157TypeInfo_G3S3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi9Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi9ZQCvZQEl14ConstructState6__initZ@Base 12
+ _D157TypeInfo_G3S3std3uni__T11TrieBuilderThTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl14ConstructState6__initZ@Base 12
+ _D157TypeInfo_G3S3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi5Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi5ZQCvZQEl14ConstructState6__initZ@Base 12
+ _D157TypeInfo_G3S3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl14ConstructState6__initZ@Base 12
+ _D157TypeInfo_S3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi13Vmi21ZQvTSQDoQDn__TQBiVmi7Vmi13ZQBvTSQEpQEo__TQCjVmi0Vmi7ZQCvZQFd6__initZ@Base 12
+ _D157TypeInfo_S3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi13Vmi21ZQvTSQDoQDn__TQBiVmi8Vmi13ZQBvTSQEpQEo__TQCjVmi0Vmi8ZQCvZQFd6__initZ@Base 12
+ _D157TypeInfo_S3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi13Vmi21ZQvTSQDoQDn__TQBiVmi9Vmi13ZQBvTSQEpQEo__TQCjVmi0Vmi9ZQCvZQFd6__initZ@Base 12
+ _D157TypeInfo_S3std5range__T5chainTSQv3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCsQCr__T10OnlyResultTaZQpTSQDuQDa__TQCzTAxaZQDhFQiZQCsZQEjFQEgQCmQBnZ6Result6__initZ@Base 12
+ _D157TypeInfo_S3std8typecons__T5TupleTSQy5range__T4TakeTSQBqQt__T6RepeatTaZQkZQBcTSQCq4conv__T7toCharsVii10TaVEQDs5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQEy6__initZ@Base 12
+ _D158TypeInfo_xG3S3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi7Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi7ZQCvZQEl14ConstructState6__initZ@Base 12
+ _D158TypeInfo_xG3S3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi8Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi8ZQCvZQEl14ConstructState6__initZ@Base 12
+ _D158TypeInfo_xG3S3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi9Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi9ZQCvZQEl14ConstructState6__initZ@Base 12
+ _D158TypeInfo_xG3S3std3uni__T11TrieBuilderThTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl14ConstructState6__initZ@Base 12
+ _D158TypeInfo_xG3S3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi5Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi5ZQCvZQEl14ConstructState6__initZ@Base 12
+ _D158TypeInfo_xG3S3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl14ConstructState6__initZ@Base 12
+ _D159TypeInfo_S3std6random__T21MersenneTwisterEngineTkVmi32Vmi624Vmi397Vmi31Vki2567483615Vmi11Vki4294967295Vmi7Vki2636928640Vmi15Vki4022730752Vmi18Vki1812433253ZQFc6__initZ@Base 12
+ _D159TypeInfo_S3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQDw8internal14unicode_tables15UnicodePropertyZQEp6__initZ@Base 12
+ _D15TypeInfo_HAxaxm6__initZ@Base 12
+ _D15TypeInfo_HAyaQd6__initZ@Base 12
+ _D15TypeInfo_PFPvZv6__initZ@Base 12
+ _D15TypeInfo_Shared6__initZ@Base 12
+ _D15TypeInfo_Shared6__vtblZ@Base 12
+ _D15TypeInfo_Shared7__ClassZ@Base 12
+ _D15TypeInfo_Struct6__initZ@Base 12
+ _D15TypeInfo_Struct6__vtblZ@Base 12
+ _D15TypeInfo_Struct7__ClassZ@Base 12
+ _D15TypeInfo_Vector6__initZ@Base 12
+ _D15TypeInfo_Vector6__vtblZ@Base 12
+ _D15TypeInfo_Vector7__ClassZ@Base 12
+ _D15TypeInfo_xHAxam6__initZ@Base 12
+ _D160TypeInfo_xS3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQDw8internal14unicode_tables15UnicodePropertyZQEp6__initZ@Base 12
+ _D161TypeInfo_S3std5range__T5chainTSQvQt__T4TakeTSQBjQBi__T6RepeatTaZQkZQBdTSQCk4conv__T7toCharsVii10TaVEQDm5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQEvFQEsQDgZQt6__initZ@Base 12
+ _D163TypeInfo_S3std5range__T11SortedRangeTASQBd8datetime8timezone13PosixTimeZone10LeapSecondVAyaa17_612e74696d6554203c20622e74696d6554VEQErQEq18SortedRangeOptionsi0ZQFh6__initZ@Base 12
+ _D164TypeInfo_S3std8typecons__T5TupleTSQy9algorithm9iteration__T9MapResultSQCi10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQEm3uni21DecompressedIntervalsZQDuZQFf6__initZ@Base 12
+ _D164TypeInfo_S3std8typecons__T5TupleTSQy9algorithm9iteration__T9MapResultSQCi10functional__T8unaryFunVAyaa4_615b315dVQpa1_61ZQBhTSQEm3uni21DecompressedIntervalsZQDuZQFf6__initZ@Base 12
+ _D165TypeInfo_S3std6random__T21MersenneTwisterEngineTkVmi32Vmi624Vmi397Vmi31Vki2567483615Vmi11Vki4294967295Vmi7Vki2636928640Vmi15Vki4022730752Vmi18Vki1812433253ZQFc5State6__initZ@Base 12
+ _D167TypeInfo_S3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi14Vmi21ZQvTSQCwQCv__TQBiVmi10Vmi14ZQBwTSQDyQDx__TQCkVmi6Vmi10ZQCxTSQEzQEy__TQDlVmi0Vmi6ZQDxZQFn6__initZ@Base 12
+ _D167TypeInfo_S3std5range__T11SortedRangeTASQBd8datetime8timezone13PosixTimeZone14TempTransitionVAyaa17_612e74696d6554203c20622e74696d6554VEQEvQEu18SortedRangeOptionsi0ZQFl6__initZ@Base 12
+ _D167TypeInfo_S3std9algorithm7sorting__T11TimSortImplSQBn3uni__T13InversionListTSQCoQBb8GcPolicyZQBh8sanitizeMFNfZ9__lambda2TSQEhQCu__TQCtTQChZQDb__T9IntervalsTAkZQoZ5Slice6__initZ@Base 12
+ _D168TypeInfo_S3std8typecons__T5TupleTSQy9algorithm9iteration__T12FilterResultSQCm8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda3TSQEj5range__T4iotaTmTxmZQlFmxmZ6ResultZQDyZQFj6__initZ@Base 12
+ _D16TypeInfo_HAyaAQe6__initZ@Base 12
+ _D16TypeInfo_HPxvAya6__initZ@Base 12
+ _D16TypeInfo_Pointer6__initZ@Base 12
+ _D16TypeInfo_Pointer6__vtblZ@Base 12
+ _D16TypeInfo_Pointer7__ClassZ@Base 12
+ _D16TypeInfo_xPFPvZv6__initZ@Base 12
+ _D175TypeInfo_S3std9algorithm9iteration__T12FilterResultS_DQBs3uni__T19comparePropertyNameTaTaZQBaFNaNfAxaQdZ4predFNaNbNiNfwZbTSQEjQEiQEb__T9MapResultSQFg5ascii7toLowerTQCoZQBhZQFf6__initZ@Base 12
+ _D176TypeInfo_S3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQFiFNcQCdZ6Result6__initZ@Base 12
+ _D177TypeInfo_S3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result6__initZ@Base 12
+ _D177TypeInfo_S3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result6__initZ@Base 12
+ _D177TypeInfo_S3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result6__initZ@Base 12
+ _D178TypeInfo_S3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFkFNcQCfZ6Result6__initZ@Base 12
+ _D179TypeInfo_S3std4path__T16asNormalizedPathTSQBg5range__T5chainTSQCa3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDzQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFoFNkMQFcZQt6__initZ@Base 12
+ _D179TypeInfo_xS3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFkFNcQCfZ6Result6__initZ@Base 12
+ _D17TypeInfo_Delegate6__initZ@Base 12
+ _D17TypeInfo_Delegate6__vtblZ@Base 12
+ _D17TypeInfo_Delegate7__ClassZ@Base 12
+ _D17TypeInfo_Function6__initZ@Base 12
+ _D17TypeInfo_Function6__vtblZ@Base 12
+ _D17TypeInfo_Function7__ClassZ@Base 12
+ _D17TypeInfo_HAyaxAya6__initZ@Base 12
+ _D17TypeInfo_xHAyaAya6__initZ@Base 12
+ _D182TypeInfo_S3std5range__T11SortedRangeTSQBc3uni__T13InversionListTSQCdQBb8GcPolicyZQBh__T9IntervalsTAkZQoSQDqQCo__TQCnTQCbZQCv8sanitizeMFNfZ9__lambda2VEQFkQFj18SortedRangeOptionsi0ZQGa6__initZ@Base 12
+ _D183TypeInfo_S3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi14Vmi21ZQvTSQCwQCv__TQBiVmi10Vmi14ZQBwTSQDyQDx__TQCkVmi6Vmi10ZQCxTSQEzQEy__TQDlVmi0Vmi6ZQDxZQFn14ConstructState6__initZ@Base 12
+ _D184TypeInfo_S3std4path__T12pathSplitterTSQBc5range__T5chainTSQBw3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDvQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFkFQEzZ12PathSplitter6__initZ@Base 12
+ _D184TypeInfo_xS3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi14Vmi21ZQvTSQCwQCv__TQBiVmi10Vmi14ZQBwTSQDyQDx__TQCkVmi6Vmi10ZQCxTSQEzQEy__TQDlVmi0Vmi6ZQDxZQFn14ConstructState6__initZ@Base 12
+ _D185TypeInfo_G4S3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi14Vmi21ZQvTSQCwQCv__TQBiVmi10Vmi14ZQBwTSQDyQDx__TQCkVmi6Vmi10ZQCxTSQEzQEy__TQDlVmi0Vmi6ZQDxZQFn14ConstructState6__initZ@Base 12
+ _D185TypeInfo_S3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe6__initZ@Base 12
+ _D185TypeInfo_S3std12experimental9allocator15building_blocks6region__T6RegionTSQCmQClQCa14mmap_allocator13MmapAllocatorVki16VEQEh8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEn6__initZ@Base 12
+ _D185TypeInfo_S3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi14Vmi21ZQvTSQDoQDn__TQBiVmi10Vmi14ZQBwTSQEqQEp__TQCkVmi6Vmi10ZQCxTSQFrQFq__TQDlVmi0Vmi6ZQDxZQGf6__initZ@Base 12
+ _D185TypeInfo_xS3std4path__T12pathSplitterTSQBc5range__T5chainTSQBw3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDvQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFkFQEzZ12PathSplitter6__initZ@Base 12
+ _D186TypeInfo_xG4S3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi14Vmi21ZQvTSQCwQCv__TQBiVmi10Vmi14ZQBwTSQDyQDx__TQCkVmi6Vmi10ZQCxTSQEzQEy__TQDlVmi0Vmi6ZQDxZQFn14ConstructState6__initZ@Base 12
+ _D18TypeInfo_HAyaxAAya6__initZ@Base 12
+ _D18TypeInfo_Interface6__initZ@Base 12
+ _D18TypeInfo_Interface6__vtblZ@Base 12
+ _D18TypeInfo_Interface7__ClassZ@Base 12
+ _D18TypeInfo_Invariant6__initZ@Base 12
+ _D18TypeInfo_Invariant6__vtblZ@Base 12
+ _D18TypeInfo_Invariant7__ClassZ@Base 12
+ _D18TypeInfo_xC6Object6__initZ@Base 12
+ _D18TypeInfo_xHAyaAAya6__initZ@Base 12
+ _D190TypeInfo_S3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe4Node6__initZ@Base 12
+ _D191TypeInfo_xS3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe4Node6__initZ@Base 12
+ _D192TypeInfo_AxS3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe4Node6__initZ@Base 12
+ _D192TypeInfo_PxS3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe4Node6__initZ@Base 12
+ _D192TypeInfo_S3std8typecons__T5TupleTSQy3uni__T13InversionListTSQByQBb8GcPolicyZQBhTEQCtQBw__T16UnicodeSetParserTSQDw5regex8internal6parser__T6ParserTAyaTSQFlQBpQBmQBg7CodeGenZQBiZQDi8OperatorZQGh6__initZ@Base 12
+ _D192TypeInfo_xAS3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe4Node6__initZ@Base 12
+ _D192TypeInfo_xPS3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe4Node6__initZ@Base 12
+ _D194TypeInfo_S4core8internal8lifetime__T10emplaceRefTS3std10functional__T7memoizeS_DQBe5regex__T9regexImplTAyaZQpFNfxAyaAxaZSQCtQBpQEi2ir__T5RegexTaZQjVii8ZQDfFxQBsQBsZ5ValueTQEsTQEwZQFnFKQFfKQFjZ1S6__initZ@Base 12
+ _D199TypeInfo_S3std9algorithm9iteration__T9MapResultSQBm8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQDjQDiQDb__T12FilterResultSQEkQCyQCsQCmMxFNbNdZ9__lambda1TSQFq5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGd6__initZ@Base 12
+ _D201TypeInfo_S3std5range__T4TakeTSQu3utf__T5byUTFTwVEQBn8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDiTSQEqQDx__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFlFNcQCfZ6ResultZQGt6__initZ@Base 12
+ _D202TypeInfo_S3std6random__T21MersenneTwisterEngineTmVmi64Vmi312Vmi156Vmi31VmN5403634167711393303Vmi29Vmi6148914691236517205Vmi17Vmi8202884508482404352Vmi37VmN2270628950310912Vmi43Vmi6364136223846793005ZQGt6__initZ@Base 12
+ _D205TypeInfo_S3std9algorithm9iteration__T10UniqResultSQBo10functional__T9binaryFunVAyaa6_61203d3d2062VQta1_61VQBba1_62ZQBvTSQEg5range__T11SortedRangeTAQCqVQCua5_61203c2062VEQGdQBx18SortedRangeOptionsi0ZQCoZQGj6__initZ@Base 12
+ _D208TypeInfo_S3std6random__T21MersenneTwisterEngineTmVmi64Vmi312Vmi156Vmi31VmN5403634167711393303Vmi29Vmi6148914691236517205Vmi17Vmi8202884508482404352Vmi37VmN2270628950310912Vmi43Vmi6364136223846793005ZQGt5State6__initZ@Base 12
+ _D20TypeInfo_S2rt3aaA2AA6__initZ@Base 12
+ _D20TypeInfo_S6object2AA6__initZ@Base 12
+ _D20TypeInfo_StaticArray6__initZ@Base 12
+ _D20TypeInfo_StaticArray6__vtblZ@Base 12
+ _D20TypeInfo_StaticArray7__ClassZ@Base 12
+ _D211TypeInfo_S4core8internal8lifetime__T10emplaceRefTS3std10functional__T7memoizeS_DQBe5regex__T9regexImplTAyaZQpFNfxAyaAxaZSQCtQBpQEi2ir__T5RegexTaZQjVii8ZQDfFxQBsQBsZ5ValueTQEsTxQClTQCmTQCmZQFwFKQFoKxQDhKQDiQDhZ1S6__initZ@Base 12
+ _D223TypeInfo_S3std6string__T14rightJustifierTSQBg3utf__T5byUTFTwVEQCa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDiTSQFdQDx__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFlFNcQCfZ6ResultZQHfFQGsmwZQs6__initZ@Base 12
+ _D224TypeInfo_xS3std6string__T14rightJustifierTSQBg3utf__T5byUTFTwVEQCa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDiTSQFdQDx__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFlFNcQCfZ6ResultZQHfFQGsmwZQs6__initZ@Base 12
+ _D228TypeInfo_S3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQEv8internal14unicode_tables9CompEntryZQEgVQCxa5_61203c2062VEQHeQHd18SortedRangeOptionsi0ZQHu6__initZ@Base 12
+ _D22TypeInfo_FNbC6ObjectZv6__initZ@Base 12
+ _D22TypeInfo_S2rt3aaA4Impl6__initZ@Base 12
+ _D230TypeInfo_S3std8typecons__T5TupleTSQy9algorithm9iteration__T9MapResultSQCi10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQEm3uni21DecompressedIntervalsZQDuTSQFwQEzQEs__TQElSQGmQEe__TQDvVQDpa4_615b315dVQEea1_61ZQExTQDqZQGiZQHt6__initZ@Base 12
+ _D231TypeInfo_S3std9algorithm9iteration__T6joinerTSQBkQBjQBc__T9MapResultSQCh8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQEeQEdQDw__T12FilterResultSQFfQCyQCsQCmMxFNbNdZ9__lambda1TSQGl5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGdZQHcFQGyZQy6__initZ@Base 12
+ _D23TypeInfo_DFNbC6ObjectZv6__initZ@Base 12
+ _D23TypeInfo_E3std3uni4Mode6__initZ@Base 12
+ _D23TypeInfo_S2rt3aaA5Range6__initZ@Base 12
+ _D249TypeInfo_S3std5range__T10roundRobinTSQBb9algorithm9iteration__T9MapResultSQCm10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQEq3uni21DecompressedIntervalsZQDuTSQGaQEzQEs__TQElSQGqQEe__TQDvVQDpa4_615b315dVQEea1_61ZQExTQDqZQGiZQIaFQHrQCvZ6Result6__initZ@Base 12
+ _D24TypeInfo_AC3std3xml4Item6__initZ@Base 12
+ _D24TypeInfo_AC3std3xml4Text6__initZ@Base 12
+ _D24TypeInfo_E3std6system2OS6__initZ@Base 12
+ _D24TypeInfo_S2rt3aaA6Bucket6__initZ@Base 12
+ _D24TypeInfo_S2rt5tlsgc4Data6__initZ@Base 12
+ _D24TypeInfo_S3std4uuid4UUID6__initZ@Base 12
+ _D24TypeInfo_xDFNbC6ObjectZv6__initZ@Base 12
+ _D250TypeInfo_S3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq6__initZ@Base 12
+ _D257TypeInfo_S3std8typecons__T5TupleTSQy9algorithm9iteration__T6joinerTSQCgQBjQBc__T9MapResultSQDd8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQFaQEdQDw__T12FilterResultSQGbQCyQCsQCmMxFNbNdZ9__lambda1TSQHh5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGdZQHcFQGyZQyZQIu6__initZ@Base 12
+ _D25TypeInfo_AC3std3xml5CData6__initZ@Base 12
+ _D25TypeInfo_AssociativeArray6__initZ@Base 12
+ _D25TypeInfo_AssociativeArray6__vtblZ@Base 12
+ _D25TypeInfo_AssociativeArray7__ClassZ@Base 12
+ _D25TypeInfo_AxDFNbC6ObjectZv6__initZ@Base 12
+ _D25TypeInfo_S3etc1c4curl3_N26__initZ@Base 12
+ _D25TypeInfo_S3std5stdio4File6__initZ@Base 12
+ _D25TypeInfo_S4core6memory2GC6__initZ@Base 12
+ _D25TypeInfo_S6object7AARange6__initZ@Base 12
+ _D25TypeInfo_xADFNbC6ObjectZv6__initZ@Base 12
+ _D25TypeInfo_xS2rt3aaA6Bucket6__initZ@Base 12
+ _D261TypeInfo_S4core8internal5array7casting__T11__ArrayCastTvTS3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe4NodeZQHqFNaNiNeNkMAvZ5Array6__initZ@Base 12
+ _D265TypeInfo_S3std3utf__T5byUTFTaVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDx6string__T14rightJustifierTSQFcQFb__TQFaTwVQExi1Z__TQFpTSQGfQGe__T10byCodeUnitTQFmZQrFQFtZ14ByCodeUnitImplZQHsFNcQCfZ6ResultZQElFQDymwZQsZQIxFNcQFsZQBf6__initZ@Base 12
+ _D26TypeInfo_AxS2rt3aaA6Bucket6__initZ@Base 12
+ _D26TypeInfo_E3std3xml7TagType6__initZ@Base 12
+ _D26TypeInfo_HAyaC3std3xml3Tag6__initZ@Base 12
+ _D26TypeInfo_S2rt6dmain25CArgs6__initZ@Base 12
+ _D26TypeInfo_S3etc1c4curl4_N286__initZ@Base 12
+ _D26TypeInfo_S3etc1c4curl4_N316__initZ@Base 12
+ _D26TypeInfo_S3std3uni7unicode6__initZ@Base 12
+ _D26TypeInfo_S3std5stdio5lines6__initZ@Base 12
+ _D26TypeInfo_S3std8typecons2No6__initZ@Base 12
+ _D26TypeInfo_xAS2rt3aaA6Bucket6__initZ@Base 12
+ _D26TypeInfo_xS3std5stdio4File6__initZ@Base 12
+ _D27TypeInfo_AC3std3xml7Comment6__initZ@Base 12
+ _D27TypeInfo_AC3std3xml7Element6__initZ@Base 12
+ _D27TypeInfo_E3etc1c4curl5CurlM6__initZ@Base 12
+ _D27TypeInfo_E3std6digest5Order6__initZ@Base 12
+ _D27TypeInfo_E3std8encoding3BOM6__initZ@Base 12
+ _D27TypeInfo_S3std3net4curl3FTP6__initZ@Base 12
+ _D27TypeInfo_S3std3uni8GcPolicy6__initZ@Base 12
+ _D27TypeInfo_S3std3uni8Grapheme6__initZ@Base 12
+ _D27TypeInfo_S3std7process4Pipe6__initZ@Base 12
+ _D27TypeInfo_S3std7sumtype4This6__initZ@Base 12
+ _D27TypeInfo_S3std8typecons3Yes6__initZ@Base 12
+ _D27TypeInfo_S4core6int1284Cent6__initZ@Base 12
+ _D27TypeInfo_S6object9Interface6__initZ@Base 12
+ _D28TypeInfo_C3std6digest6Digest6__initZ@Base 12
+ _D28TypeInfo_E2rt3aaA4Impl5Flags6__initZ@Base 12
+ _D28TypeInfo_E3std3csv9Malformed6__initZ@Base 12
+ _D28TypeInfo_E3std4file8SpanMode6__initZ@Base 12
+ _D28TypeInfo_E3std4json8JSONType6__initZ@Base 12
+ _D28TypeInfo_E3std6getopt6config6__initZ@Base 12
+ _D28TypeInfo_E3std6system6Endian6__initZ@Base 12
+ _D28TypeInfo_S2rt8lifetime5Array6__initZ@Base 12
+ _D28TypeInfo_S3std3net4curl4Curl6__initZ@Base 12
+ _D28TypeInfo_S3std3net4curl4HTTP6__initZ@Base 12
+ _D28TypeInfo_S3std3net4curl4SMTP6__initZ@Base 12
+ _D28TypeInfo_S3std4file8DirEntry6__initZ@Base 12
+ _D28TypeInfo_S3std6bigint6BigInt6__initZ@Base 12
+ _D28TypeInfo_S3std6digest2md3MD56__initZ@Base 12
+ _D28TypeInfo_S3std6getopt6Option6__initZ@Base 12
+ _D28TypeInfo_S3std6int1286Int1286__initZ@Base 12
+ _D28TypeInfo_S3std6socket6Linger6__initZ@Base 12
+ _D29TypeInfo_AS3std4file8DirEntry6__initZ@Base 12
+ _D29TypeInfo_E3etc1c4curl7CurlFtp6__initZ@Base 12
+ _D29TypeInfo_E3etc1c4curl7CurlMsg6__initZ@Base 12
+ _D29TypeInfo_E3etc1c4curl7CurlVer6__initZ@Base 12
+ _D29TypeInfo_E3std5stdio8LockType6__initZ@Base 12
+ _D29TypeInfo_S2rt9profilegc5Entry6__initZ@Base 12
+ _D29TypeInfo_S3etc1c4curl7CURLMsg6__initZ@Base 12
+ _D29TypeInfo_S3std4json9JSONValue6__initZ@Base 12
+ _D29TypeInfo_S3std5range8NullSink6__initZ@Base 12
+ _D29TypeInfo_S3std6socket7TimeVal6__initZ@Base 12
+ _D29TypeInfo_S3std7process6Config6__initZ@Base 12
+ _D29TypeInfo_S4core4time8Duration6__initZ@Base 12
+ _D29TypeInfo_S4core5bitop7Split646__initZ@Base 12
+ _D29TypeInfo_S4core7runtime5CArgs6__initZ@Base 12
+ _D29TypeInfo_S6object10ModuleInfo6__initZ@Base 12
+ _D29TypeInfo_xE2rt3aaA4Impl5Flags6__initZ@Base 12
+ _D29TypeInfo_xE3std4file8SpanMode6__initZ@Base 12
+ _D29TypeInfo_xS3std3net4curl4Curl6__initZ@Base 12
+ _D29TypeInfo_xS3std4file8DirEntry6__initZ@Base 12
+ _D29TypeInfo_xS3std6getopt6Option6__initZ@Base 12
+ _D2rt11arrayassign11__moduleRefZ@Base 12
+ _D2rt11arrayassign12__ModuleInfoZ@Base 12
+ _D2rt3aaA10allocEntryFMxPSQyQx4ImplMxPvZPv@Base 12
+ _D2rt3aaA11__moduleRefZ@Base 12
+ _D2rt3aaA11fakeEntryTIFKSQxQw4ImplxC8TypeInfoxQlZ13tiMangledNameyAa@Base 12
+ _D2rt3aaA11fakeEntryTIFKSQxQw4ImplxC8TypeInfoxQlZC15TypeInfo_Struct@Base 12
+ _D2rt3aaA11rtinfoEntryFKSQxQw4ImplPymQdPmmZPyv@Base 12
+ _D2rt3aaA12__ModuleInfoZ@Base 12
+ _D2rt3aaA12allocBucketsFNaNbNemZASQBgQBg6Bucket@Base 12
+ _D2rt3aaA2AA5emptyMxFNaNbNdNiZb@Base 12
+ _D2rt3aaA2AA6__initZ@Base 12
+ _D2rt3aaA3mixFNaNbNiNfmZm@Base 12
+ _D2rt3aaA4Impl11__xopEqualsMxFKxSQBfQBfQBeZb@Base 12
+ _D2rt3aaA4Impl14findSlotInsertMNgFNaNbNimZPNgSQBsQBs6Bucket@Base 12
+ _D2rt3aaA4Impl14findSlotLookupMNgFmMxPvMxC8TypeInfoZPNgSQCcQCc6Bucket@Base 12
+ _D2rt3aaA4Impl3dimMxFNaNbNdNiNfZm@Base 12
+ _D2rt3aaA4Impl4growMFMxC8TypeInfoZv@Base 12
+ _D2rt3aaA4Impl4maskMxFNaNbNdNiZm@Base 12
+ _D2rt3aaA4Impl5clearMFNaNbZv@Base 12
+ _D2rt3aaA4Impl6__ctorMFNcMxC25TypeInfo_AssociativeArraymZSQCeQCeQCd@Base 12
+ _D2rt3aaA4Impl6__initZ@Base 12
+ _D2rt3aaA4Impl6lengthMxFNaNbNdNiZm@Base 12
+ _D2rt3aaA4Impl6resizeMFNaNbmZv@Base 12
+ _D2rt3aaA4Impl6shrinkMFMxC8TypeInfoZv@Base 12
+ _D2rt3aaA4Impl9__xtoHashFNbNeKxSQBeQBeQBdZm@Base 12
+ _D2rt3aaA5Range6__initZ@Base 12
+ _D2rt3aaA6Bucket5emptyMxFNaNbNdNiZb@Base 12
+ _D2rt3aaA6Bucket6__initZ@Base 12
+ _D2rt3aaA6Bucket6filledMxFNaNbNdNiNfZb@Base 12
+ _D2rt3aaA6Bucket7deletedMxFNaNbNdNiZb@Base 12
+ _D2rt3aaA6talignFNaNbNiNfmmZm@Base 12
+ _D2rt3aaA7hasDtorFxC8TypeInfoZb@Base 12
+ _D2rt3aaA8calcHashFMxPvMxC8TypeInfoZm@Base 12
+ _D2rt3aaA8nextpow2FNaNbNixmZm@Base 12
+ _D2rt3aaA9entryDtorFPvxC15TypeInfo_StructZv@Base 12
+ _D2rt3aaA9getRTInfoFxC8TypeInfoZPyv@Base 12
+ _D2rt3adi11__moduleRefZ@Base 12
+ _D2rt3adi12__ModuleInfoZ@Base 12
+ _D2rt3deh11__moduleRefZ@Base 12
+ _D2rt3deh12__ModuleInfoZ@Base 12
+ _D2rt4util7utility10safeAssertFNbNiNfbMAyaMQemZv@Base 12
+ _D2rt4util7utility11__moduleRefZ@Base 12
+ _D2rt4util7utility12__ModuleInfoZ@Base 12
+ _D2rt4util7utility__T8_ComplexTdZQm11__xopEqualsMxFKxSQCaQCaQBy__TQBtTdZQBzZb@Base 12
+ _D2rt4util7utility__T8_ComplexTdZQm6__initZ@Base 12
+ _D2rt4util7utility__T8_ComplexTdZQm9__xtoHashFNbNeKxSQBzQBzQBx__TQBsTdZQByZm@Base 12
+ _D2rt4util7utility__T8_ComplexTeZQm11__xopEqualsMxFKxSQCaQCaQBy__TQBtTeZQBzZb@Base 12
+ _D2rt4util7utility__T8_ComplexTeZQm6__initZ@Base 12
+ _D2rt4util7utility__T8_ComplexTeZQm9__xtoHashFNbNeKxSQBzQBzQBx__TQBsTeZQByZm@Base 12
+ _D2rt4util7utility__T8_ComplexTfZQm11__xopEqualsMxFKxSQCaQCaQBy__TQBtTfZQBzZb@Base 12
+ _D2rt4util7utility__T8_ComplexTfZQm6__initZ@Base 12
+ _D2rt4util7utility__T8_ComplexTfZQm9__xtoHashFNbNeKxSQBzQBzQBx__TQBsTfZQByZm@Base 12
+ _D2rt4util8typeinfo10TypeInfo_c8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo10TypeInfo_j8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo10TypeInfo_n11initializerMxFNaNbNiNeZAxv@Base 12
+ _D2rt4util8typeinfo10TypeInfo_n4swapMxFNaNbNiNfPvQcZv@Base 12
+ _D2rt4util8typeinfo10TypeInfo_n5tsizeMxFNaNbNdNiNfZm@Base 12
+ _D2rt4util8typeinfo10TypeInfo_n6equalsMxFNaNbNiNfIPvIQdZb@Base 12
+ _D2rt4util8typeinfo10TypeInfo_n6rtInfoMxFNaNbNdNiNfZPyv@Base 12
+ _D2rt4util8typeinfo10TypeInfo_n7compareMxFNaNbNiNfIPvIQdZi@Base 12
+ _D2rt4util8typeinfo10TypeInfo_n7getHashMxFNaNbNiNfMxPvZm@Base 12
+ _D2rt4util8typeinfo10TypeInfo_n8toStringMxFNaNbNiNfZAya@Base 12
+ _D2rt4util8typeinfo10TypeInfo_o8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo10TypeInfo_p8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo10TypeInfo_q8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo10TypeInfo_r8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo10TypeInfo_v5flagsMxFNaNbNdNiNeZk@Base 12
+ _D2rt4util8typeinfo10TypeInfo_v7getHashMxFNaNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo10TypeInfo_v8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo11TypeInfo_Ac8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo11TypeInfo_Aj8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo11TypeInfo_Ao8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo11TypeInfo_Ap8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo11TypeInfo_Aq8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo11TypeInfo_Ar8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo11TypeInfo_Av4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo11TypeInfo_Av8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo11__moduleRefZ@Base 12
+ _D2rt4util8typeinfo12TypeInfo_Aya8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo12__ModuleInfoZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility16__c_complex_realTQBiZQCe11initializerMxFNaNbNeZ1cyG1EQEeQEeQCrQCm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility16__c_complex_realTQBiZQCe11initializerMxFNaNbNiNeZAxv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility16__c_complex_realTQBiZQCe4swapMxFNaNbNePvQcZv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility16__c_complex_realTQBiZQCe5tsizeMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility16__c_complex_realTQBiZQCe6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility16__c_complex_realTQBiZQCe6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility16__c_complex_realTQBiZQCe6equalsMxFNaNbNeIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility16__c_complex_realTQBiZQCe6rtInfoMxFNaNbNdNiNeZPyv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility16__c_complex_realTQBiZQCe6talignMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility16__c_complex_realTQBiZQCe7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility16__c_complex_realTQBiZQCe7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility16__c_complex_realTQBiZQCe7getHashMxFNaNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility16__c_complex_realTQBiZQCe8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility17__c_complex_floatTQBjZQCf11initializerMxFNaNbNeZ1cyG1EQEfQEfQCsQCn@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility17__c_complex_floatTQBjZQCf11initializerMxFNaNbNiNeZAxv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility17__c_complex_floatTQBjZQCf4swapMxFNaNbNePvQcZv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility17__c_complex_floatTQBjZQCf5flagsMxFNaNbNdNiNeZk@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility17__c_complex_floatTQBjZQCf5tsizeMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility17__c_complex_floatTQBjZQCf6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility17__c_complex_floatTQBjZQCf6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility17__c_complex_floatTQBjZQCf6equalsMxFNaNbNeIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility17__c_complex_floatTQBjZQCf6rtInfoMxFNaNbNdNiNeZPyv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility17__c_complex_floatTQBjZQCf6talignMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility17__c_complex_floatTQBjZQCf7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility17__c_complex_floatTQBjZQCf7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility17__c_complex_floatTQBjZQCf7getHashMxFNaNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility17__c_complex_floatTQBjZQCf8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility18__c_complex_doubleTQBkZQCg11initializerMxFNaNbNeZ1cyG1EQEgQEgQCtQCo@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility18__c_complex_doubleTQBkZQCg11initializerMxFNaNbNiNeZAxv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility18__c_complex_doubleTQBkZQCg4swapMxFNaNbNePvQcZv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility18__c_complex_doubleTQBkZQCg5flagsMxFNaNbNdNiNeZk@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility18__c_complex_doubleTQBkZQCg5tsizeMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility18__c_complex_doubleTQBkZQCg6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility18__c_complex_doubleTQBkZQCg6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility18__c_complex_doubleTQBkZQCg6equalsMxFNaNbNeIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility18__c_complex_doubleTQBkZQCg6rtInfoMxFNaNbNdNiNeZPyv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility18__c_complex_doubleTQBkZQCg6talignMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility18__c_complex_doubleTQBkZQCg7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility18__c_complex_doubleTQBkZQCg7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility18__c_complex_doubleTQBkZQCg7getHashMxFNaNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTEQBnQBn7utility18__c_complex_doubleTQBkZQCg8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTaThZQw11initializerMxFNaNbNeZ1cyG1a@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTaThZQw11initializerMxFNaNbNiNeZAxv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTaThZQw6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTaThZQw6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTaThZQw7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTaThZQw8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTbThZQw6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTbThZQw6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTbThZQw7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTbThZQw7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTbThZQw8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTdTdZQw11initializerMxFNaNbNeZ1cyG1d@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTdTdZQw11initializerMxFNaNbNiNeZAxv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTdTdZQw4swapMxFNaNbNePvQcZv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTdTdZQw5flagsMxFNaNbNdNiNeZk@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTdTdZQw5tsizeMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTdTdZQw6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTdTdZQw6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTdTdZQw6equalsMxFNaNbNeIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTdTdZQw6rtInfoMxFNaNbNdNiNeZPyv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTdTdZQw6talignMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTdTdZQw7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTdTdZQw7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTdTdZQw7getHashMxFNaNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTdTdZQw8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTeTeZQw11initializerMxFNaNbNeZ1cyG1e@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTeTeZQw11initializerMxFNaNbNiNeZAxv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTeTeZQw4swapMxFNaNbNePvQcZv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTeTeZQw5tsizeMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTeTeZQw6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTeTeZQw6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTeTeZQw6equalsMxFNaNbNeIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTeTeZQw6rtInfoMxFNaNbNdNiNeZPyv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTeTeZQw6talignMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTeTeZQw7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTeTeZQw7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTeTeZQw7getHashMxFNaNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTeTeZQw8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTfTfZQw11initializerMxFNaNbNeZ1cyG1f@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTfTfZQw11initializerMxFNaNbNiNeZAxv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTfTfZQw4swapMxFNaNbNePvQcZv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTfTfZQw5flagsMxFNaNbNdNiNeZk@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTfTfZQw5tsizeMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTfTfZQw6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTfTfZQw6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTfTfZQw6equalsMxFNaNbNeIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTfTfZQw6rtInfoMxFNaNbNdNiNeZPyv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTfTfZQw6talignMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTfTfZQw7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTfTfZQw7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTfTfZQw7getHashMxFNaNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTfTfZQw8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTgThZQw6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTgThZQw6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTgThZQw7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTgThZQw7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTgThZQw8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericThThZQw11initializerMxFNaNbNiNeZAxv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericThThZQw4swapMxFNaNbNePvQcZv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericThThZQw5tsizeMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericThThZQw6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericThThZQw6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericThThZQw6equalsMxFNaNbNeIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericThThZQw6rtInfoMxFNaNbNdNiNeZPyv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericThThZQw6talignMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericThThZQw7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericThThZQw7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericThThZQw7getHashMxFNaNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericThThZQw8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTiTkZQw6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTiTkZQw6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTiTkZQw7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTiTkZQw7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTiTkZQw8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTkTkZQw11initializerMxFNaNbNiNeZAxv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTkTkZQw4swapMxFNaNbNePvQcZv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTkTkZQw5tsizeMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTkTkZQw6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTkTkZQw6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTkTkZQw6equalsMxFNaNbNeIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTkTkZQw6rtInfoMxFNaNbNdNiNeZPyv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTkTkZQw6talignMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTkTkZQw7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTkTkZQw7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTkTkZQw7getHashMxFNaNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTkTkZQw8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTlTmZQw6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTlTmZQw6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTlTmZQw7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTlTmZQw7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTlTmZQw8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTmTmZQw11initializerMxFNaNbNiNeZAxv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTmTmZQw4swapMxFNaNbNePvQcZv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTmTmZQw5tsizeMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTmTmZQw6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTmTmZQw6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTmTmZQw6equalsMxFNaNbNeIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTmTmZQw6rtInfoMxFNaNbNdNiNeZPyv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTmTmZQw6talignMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTmTmZQw7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTmTmZQw7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTmTmZQw7getHashMxFNaNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTmTmZQw8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTsTtZQw6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTsTtZQw6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTsTtZQw7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTsTtZQw7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTsTtZQw8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTtTtZQw11initializerMxFNaNbNiNeZAxv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTtTtZQw4swapMxFNaNbNePvQcZv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTtTtZQw5tsizeMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTtTtZQw6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTtTtZQw6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTtTtZQw6equalsMxFNaNbNeIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTtTtZQw6rtInfoMxFNaNbNdNiNeZPyv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTtTtZQw6talignMxFNaNbNdNiNeZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTtTtZQw7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTtTtZQw7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTtTtZQw7getHashMxFNaNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTtTtZQw8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTuTtZQw11initializerMxFNaNbNeZ1cyG1u@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTuTtZQw11initializerMxFNaNbNiNeZAxv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTuTtZQw6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTuTtZQw6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTuTtZQw7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTuTtZQw8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTwTkZQw11initializerMxFNaNbNeZ1cyG1w@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTwTkZQw11initializerMxFNaNbNiNeZAxv@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTwTkZQw6__initZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTwTkZQw6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTwTkZQw7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTwTkZQw7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T15TypeInfoGenericTwTkZQw8toStringMxFNaNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility16__c_complex_realTQBiZQCj4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility16__c_complex_realTQBiZQCj6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility16__c_complex_realTQBiZQCj6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility16__c_complex_realTQBiZQCj6equalsMxFIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility16__c_complex_realTQBiZQCj7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility16__c_complex_realTQBiZQCj7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility16__c_complex_realTQBiZQCj7getHashMxFNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility16__c_complex_realTQBiZQCj8opEqualsMxFNbNfxC6ObjectZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility16__c_complex_realTQBiZQCj8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility17__c_complex_floatTQBjZQCk4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility17__c_complex_floatTQBjZQCk6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility17__c_complex_floatTQBjZQCk6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility17__c_complex_floatTQBjZQCk6equalsMxFIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility17__c_complex_floatTQBjZQCk7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility17__c_complex_floatTQBjZQCk7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility17__c_complex_floatTQBjZQCk7getHashMxFNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility17__c_complex_floatTQBjZQCk8opEqualsMxFNbNfxC6ObjectZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility17__c_complex_floatTQBjZQCk8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility18__c_complex_doubleTQBkZQCl4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility18__c_complex_doubleTQBkZQCl6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility18__c_complex_doubleTQBkZQCl6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility18__c_complex_doubleTQBkZQCl6equalsMxFIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility18__c_complex_doubleTQBkZQCl7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility18__c_complex_doubleTQBkZQCl7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility18__c_complex_doubleTQBkZQCl7getHashMxFNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility18__c_complex_doubleTQBkZQCl8opEqualsMxFNbNfxC6ObjectZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTEQBsQBs7utility18__c_complex_doubleTQBkZQCl8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTaThZQBb4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTaThZQBb6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTaThZQBb6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTaThZQBb7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTaThZQBb8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTbThZQBb4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTbThZQBb6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTbThZQBb6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTbThZQBb7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTbThZQBb7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTbThZQBb8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTdTdZQBb4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTdTdZQBb6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTdTdZQBb6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTdTdZQBb6equalsMxFIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTdTdZQBb7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTdTdZQBb7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTdTdZQBb7getHashMxFNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTdTdZQBb8opEqualsMxFNbNfxC6ObjectZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTdTdZQBb8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTeTeZQBb4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTeTeZQBb6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTeTeZQBb6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTeTeZQBb6equalsMxFIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTeTeZQBb7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTeTeZQBb7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTeTeZQBb7getHashMxFNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTeTeZQBb8opEqualsMxFNbNfxC6ObjectZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTeTeZQBb8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTfTfZQBb4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTfTfZQBb6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTfTfZQBb6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTfTfZQBb6equalsMxFIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTfTfZQBb7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTfTfZQBb7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTfTfZQBb7getHashMxFNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTfTfZQBb8opEqualsMxFNbNfxC6ObjectZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTfTfZQBb8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTgThZQBb4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTgThZQBb6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTgThZQBb6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTgThZQBb7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTgThZQBb7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTgThZQBb8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericThThZQBb4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericThThZQBb6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericThThZQBb6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericThThZQBb6equalsMxFIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericThThZQBb7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericThThZQBb7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericThThZQBb7getHashMxFNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericThThZQBb8opEqualsMxFNbNfxC6ObjectZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericThThZQBb8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTiTkZQBb4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTiTkZQBb6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTiTkZQBb6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTiTkZQBb7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTiTkZQBb7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTiTkZQBb8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTkTkZQBb4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTkTkZQBb6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTkTkZQBb6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTkTkZQBb6equalsMxFIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTkTkZQBb7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTkTkZQBb7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTkTkZQBb7getHashMxFNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTkTkZQBb8opEqualsMxFNbNfxC6ObjectZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTkTkZQBb8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTlTmZQBb4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTlTmZQBb6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTlTmZQBb6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTlTmZQBb7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTlTmZQBb7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTlTmZQBb8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTmTmZQBb4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTmTmZQBb6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTmTmZQBb6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTmTmZQBb6equalsMxFIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTmTmZQBb7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTmTmZQBb7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTmTmZQBb7getHashMxFNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTmTmZQBb8opEqualsMxFNbNfxC6ObjectZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTmTmZQBb8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTsTtZQBb4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTsTtZQBb6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTsTtZQBb6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTsTtZQBb7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTsTtZQBb7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTsTtZQBb8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTtTtZQBb4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTtTtZQBb6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTtTtZQBb6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTtTtZQBb6equalsMxFIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTtTtZQBb7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTtTtZQBb7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTtTtZQBb7getHashMxFNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTtTtZQBb8opEqualsMxFNbNfxC6ObjectZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTtTtZQBb8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTuTtZQBb4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTuTtZQBb6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTuTtZQBb6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTuTtZQBb7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTuTtZQBb8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTwTkZQBb4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTwTkZQBb6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTwTkZQBb6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTwTkZQBb7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTwTkZQBb7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTwTkZQBb8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTxaTxaZQBd4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTxaTxaZQBd6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTxaTxaZQBd6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTxaTxaZQBd6equalsMxFIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTxaTxaZQBd7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTxaTxaZQBd7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTxaTxaZQBd7getHashMxFNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTxaTxaZQBd8opEqualsMxFNbNfxC6ObjectZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTxaTxaZQBd8toStringMxFNbNfZAya@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTyaTyaZQBd4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTyaTyaZQBd6__initZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTyaTyaZQBd6__vtblZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTyaTyaZQBd6equalsMxFIPvIQdZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTyaTyaZQBd7__ClassZ@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTyaTyaZQBd7compareMxFIPvIQdZi@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTyaTyaZQBd7getHashMxFNbNeMxPvZm@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTyaTyaZQBd8opEqualsMxFNbNfxC6ObjectZb@Base 12
+ _D2rt4util8typeinfo__T20TypeInfoArrayGenericTyaTyaZQBd8toStringMxFNbNfZAya@Base 12
+ _D2rt5cast_11__moduleRefZ@Base 12
+ _D2rt5cast_12__ModuleInfoZ@Base 12
+ _D2rt5cast_18areClassInfosEqualFNaNbNiNfMxC14TypeInfo_ClassMxQtZb@Base 12
+ _D2rt5minfo11ModuleGroup11__xopEqualsMxFKxSQBpQBpQBmZb@Base 12
+ _D2rt5minfo11ModuleGroup11runTlsCtorsMFZv@Base 12
+ _D2rt5minfo11ModuleGroup11runTlsDtorsMFZv@Base 12
+ _D2rt5minfo11ModuleGroup12genCyclePathMFmmAAiZAm@Base 12
+ _D2rt5minfo11ModuleGroup12sortCtorsOldMFAAiZ8StackRec11__xopEqualsMxFKxSQCsQCsQCpQCfMFQBuZQBuZb@Base 12
+ _D2rt5minfo11ModuleGroup12sortCtorsOldMFAAiZ8StackRec3modMFNdZi@Base 12
+ _D2rt5minfo11ModuleGroup12sortCtorsOldMFAAiZ8StackRec6__initZ@Base 12
+ _D2rt5minfo11ModuleGroup12sortCtorsOldMFAAiZ8StackRec9__xtoHashFNbNeKxSQCrQCrQCoQCeMFQBtZQBtZm@Base 12
+ _D2rt5minfo11ModuleGroup12sortCtorsOldMFAAiZb@Base 12
+ _D2rt5minfo11ModuleGroup4freeMFZv@Base 12
+ _D2rt5minfo11ModuleGroup6__ctorMFNbNcNiAyPS6object10ModuleInfoZSQCkQCkQCh@Base 12
+ _D2rt5minfo11ModuleGroup6__initZ@Base 12
+ _D2rt5minfo11ModuleGroup7modulesMxFNbNdNiZAyPS6object10ModuleInfo@Base 12
+ _D2rt5minfo11ModuleGroup8runCtorsMFZv@Base 12
+ _D2rt5minfo11ModuleGroup8runDtorsMFZv@Base 12
+ _D2rt5minfo11ModuleGroup9__xtoHashFNbNeKxSQBoQBoQBlZm@Base 12
+ _D2rt5minfo11ModuleGroup9sortCtorsMFAyaZ8findDepsMFmPmZ10stackFrame6__initZ@Base 12
+ _D2rt5minfo11ModuleGroup9sortCtorsMFAyaZv@Base 12
+ _D2rt5minfo11ModuleGroup9sortCtorsMFZv@Base 12
+ _D2rt5minfo11__moduleRefZ@Base 12
+ _D2rt5minfo12__ModuleInfoZ@Base 12
+ _D2rt5minfo17moduleinfos_applyFMDFyPS6object10ModuleInfoZiZi@Base 12
+ _D2rt5tlsgc11__moduleRefZ@Base 12
+ _D2rt5tlsgc12__ModuleInfoZ@Base 12
+ _D2rt5tlsgc14processGCMarksFNbPvMDFNbQhZiZv@Base 12
+ _D2rt5tlsgc4Data6__initZ@Base 12
+ _D2rt5tlsgc4initFNbNiZPv@Base 12
+ _D2rt5tlsgc4scanFNbPvMDFNbQhQjZvZv@Base 12
+ _D2rt5tlsgc7destroyFNbNiPvZv@Base 12
+ _D2rt6aApply11__moduleRefZ@Base 12
+ _D2rt6aApply12__ModuleInfoZ@Base 12
+ _D2rt6config11__moduleRefZ@Base 12
+ _D2rt6config12__ModuleInfoZ@Base 12
+ _D2rt6config13rt_linkOptionFNbNiAyaMDFNbNiQkZQnZQq@Base 12
+ _D2rt6config15rt_configOptionFNbNiAyaMDFNbNiQkZQnbZQr@Base 12
+ _D2rt6config16rt_cmdlineOptionFNbNiAyaMDFNbNiQkZQnZQq@Base 12
+ _D2rt6config16rt_envvarsOptionFNbNiAyaMDFNbNiQkZQnZQq@Base 12
+ _D2rt6dmain210_initCountOm@Base 12
+ _D2rt6dmain211__moduleRefZ@Base 12
+ _D2rt6dmain212__ModuleInfoZ@Base 12
+ _D2rt6dmain212traceHandlerPFPvZC6object9Throwable9TraceInfo@Base 12
+ _D2rt6dmain214UnitTestResult6__initZ@Base 12
+ _D2rt6dmain215formatThrowableFC6object9ThrowableMDFNbIAaZvZv@Base 12
+ _D2rt6dmain221parseExceptionOptionsFNbNiZb@Base 12
+ _D2rt6dmain25CArgs6__initZ@Base 12
+ _D2rt6dmain26_cArgsSQsQr5CArgs@Base 12
+ _D2rt6dmain27_d_argsAAya@Base 12
+ _D2rt6memory11__moduleRefZ@Base 12
+ _D2rt6memory12__ModuleInfoZ@Base 12
+ _D2rt6memory16initStaticDataGCFZv@Base 12
+ _D2rt7aApplyR11__moduleRefZ@Base 12
+ _D2rt7aApplyR12__ModuleInfoZ@Base 12
+ _D2rt7ehalloc11__moduleRefZ@Base 12
+ _D2rt7ehalloc12__ModuleInfoZ@Base 12
+ _D2rt8arraycat11__moduleRefZ@Base 12
+ _D2rt8arraycat12__ModuleInfoZ@Base 12
+ _D2rt8lifetime10__arrayPadFNaNbNemxC8TypeInfoZm@Base 12
+ _D2rt8lifetime10__blkcacheFNbNdZPS4core6memory8BlkInfo_@Base 12
+ _D2rt8lifetime11__moduleRefZ@Base 12
+ _D2rt8lifetime11hasPostblitFIC8TypeInfoZb@Base 12
+ _D2rt8lifetime11newCapacityFmmZm@Base 12
+ _D2rt8lifetime12__ModuleInfoZ@Base 12
+ _D2rt8lifetime12__arrayAllocFNaNbmMxC8TypeInfoxQlZS4core6memory8BlkInfo_@Base 12
+ _D2rt8lifetime12__arrayAllocFmKS4core6memory8BlkInfo_MxC8TypeInfoxQlZQBm@Base 12
+ _D2rt8lifetime12__arrayStartFNaNbNkMS4core6memory8BlkInfo_ZPv@Base 12
+ _D2rt8lifetime12__doPostblitFPvmxC8TypeInfoZv@Base 12
+ _D2rt8lifetime12__getBlkInfoFNbPvZPS4core6memory8BlkInfo_@Base 12
+ _D2rt8lifetime12__nextBlkIdxi@Base 12
+ _D2rt8lifetime14collectHandlerPFC6ObjectZb@Base 12
+ _D2rt8lifetime14finalize_arrayFPvmxC15TypeInfo_StructZv@Base 12
+ _D2rt8lifetime14processGCMarksFNbPS4core6memory8BlkInfo_MDFNbPvZiZv@Base 12
+ _D2rt8lifetime15__arrayClearPadFNaNbKS4core6memory8BlkInfo_mmZv@Base 12
+ _D2rt8lifetime15finalize_array2FNbPvmZv@Base 12
+ _D2rt8lifetime15finalize_structFNbPvmZv@Base 12
+ _D2rt8lifetime18__arrayAllocLengthFNaNbKS4core6memory8BlkInfo_xC8TypeInfoZm@Base 12
+ _D2rt8lifetime18__blkcache_storagePS4core6memory8BlkInfo_@Base 12
+ _D2rt8lifetime18structTypeInfoSizeFNaNbNixC8TypeInfoZm@Base 12
+ _D2rt8lifetime19_d_arraysetlengthiTUxC8TypeInfomPAvZ12doInitializeFNaNbNiPvQcxAvZv@Base 12
+ _D2rt8lifetime19_staticDtor_L503_C1FZv@Base 12
+ _D2rt8lifetime20ArrayAllocLengthLock6__initZ@Base 12
+ _D2rt8lifetime20ArrayAllocLengthLock6__vtblZ@Base 12
+ _D2rt8lifetime20ArrayAllocLengthLock7__ClassZ@Base 12
+ _D2rt8lifetime20__insertBlkInfoCacheFNbS4core6memory8BlkInfo_PQxZv@Base 12
+ _D2rt8lifetime21__setArrayAllocLengthFNaNbKS4core6memory8BlkInfo_mbxC8TypeInfomZb@Base 12
+ _D2rt8lifetime26hasArrayFinalizerInSegmentFNbPvmIAvZi@Base 12
+ _D2rt8lifetime27hasStructFinalizerInSegmentFNbPvmIAvZi@Base 12
+ _D2rt8lifetime5Array6__initZ@Base 12
+ _D2rt8lifetime9unqualifyFNaNbNiNkMNgC8TypeInfoZNgQn@Base 12
+ _D2rt8lifetime__T14_d_newarrayOpTX12_d_newarrayTZQBgFNaNbxC8TypeInfoAmZAv@Base 12
+ _D2rt8lifetime__T14_d_newarrayOpTX13_d_newarrayiTZQBhFNaNbxC8TypeInfoAmZAv@Base 12
+ _D2rt8monitor_10getMonitorFNaNbNiC6ObjectZPOSQBrQBr7Monitor@Base 12
+ _D2rt8monitor_10setMonitorFNaNbNiC6ObjectPOSQBqQBq7MonitorZv@Base 12
+ _D2rt8monitor_11__moduleRefZ@Base 12
+ _D2rt8monitor_11unlockMutexFNbNiPS4core3sys5posixQk5types15pthread_mutex_tZv@Base 12
+ _D2rt8monitor_12__ModuleInfoZ@Base 12
+ _D2rt8monitor_12destroyMutexFNbNiPS4core3sys5posixQk5types15pthread_mutex_tZv@Base 12
+ _D2rt8monitor_12disposeEventFNbPSQBfQBf7MonitorC6ObjectZv@Base 12
+ _D2rt8monitor_13deleteMonitorFNbNiPSQBiQBi7MonitorZv@Base 12
+ _D2rt8monitor_13ensureMonitorFNbC6ObjectZPOSQBqQBq7Monitor@Base 12
+ _D2rt8monitor_4gmtxS4core3sys5posixQk5types15pthread_mutex_t@Base 12
+ _D2rt8monitor_5gattrS4core3sys5posixQk5types19pthread_mutexattr_t@Base 12
+ _D2rt8monitor_7Monitor11__xopEqualsMxFKxSQBnQBnQBhZb@Base 12
+ _D2rt8monitor_7Monitor6__initZ@Base 12
+ _D2rt8monitor_7Monitor9__xtoHashFNbNeKxSQBmQBmQBgZm@Base 12
+ _D2rt8monitor_7monitorFNaNbNcNdNiNkMC6ObjectZOPSQBuQBu7Monitor@Base 12
+ _D2rt8monitor_9initMutexFNbNiPS4core3sys5posixQk5types15pthread_mutex_tZv@Base 12
+ _D2rt8monitor_9lockMutexFNbNiPS4core3sys5posixQk5types15pthread_mutex_tZv@Base 12
+ _D2rt8sections11__moduleRefZ@Base 12
+ _D2rt8sections12__ModuleInfoZ@Base 12
+ _D2rt8sections20scanDataSegPreciselyFNbNiZ3errC6object5Error@Base 12
+ _D2rt8sections20scanDataSegPreciselyFNbNiZb@Base 12
+ _D2rt9critical_11__moduleRefZ@Base 12
+ _D2rt9critical_11ensureMutexFNbPOSQBgQBg18D_CRITICAL_SECTIONZv@Base 12
+ _D2rt9critical_12__ModuleInfoZ@Base 12
+ _D2rt9critical_18D_CRITICAL_SECTION6__initZ@Base 12
+ _D2rt9critical_3gcsOSQtQs18D_CRITICAL_SECTION@Base 12
+ _D2rt9critical_4headOPSQvQu18D_CRITICAL_SECTION@Base 12
+ _D2rt9profilegc10accumulateFNbNiAyakQeQgmZv@Base 12
+ _D2rt9profilegc11__moduleRefZ@Base 12
+ _D2rt9profilegc11logfilenameAya@Base 12
+ _D2rt9profilegc12__ModuleInfoZ@Base 12
+ _D2rt9profilegc15globalNewCountsS4core8internal9container7hashtab__T7HashTabTAxaTSQDcQDc5EntryZQBb@Base 12
+ _D2rt9profilegc18_staticDtor_L93_C1FZ11__critsec19OPv@Base 12
+ _D2rt9profilegc18_staticDtor_L93_C1FZv@Base 12
+ _D2rt9profilegc25_sharedStaticDtor_L115_C1FZ6Result11__xopEqualsMxFKxSQCqQCqQCjFZQBlZb@Base 12
+ _D2rt9profilegc25_sharedStaticDtor_L115_C1FZ6Result6__initZ@Base 12
+ _D2rt9profilegc25_sharedStaticDtor_L115_C1FZ6Result9__xtoHashFNbNeKxSQCpQCpQCiFZQBkZm@Base 12
+ _D2rt9profilegc25_sharedStaticDtor_L115_C1FZ6Result9qsort_cmpUNbNiMxPvMxQeZi@Base 12
+ _D2rt9profilegc25_sharedStaticDtor_L115_C1FZv@Base 12
+ _D2rt9profilegc5Entry6__initZ@Base 12
+ _D2rt9profilegc6bufferAa@Base 12
+ _D2rt9profilegc9newCountsS4core8internal9container7hashtab__T7HashTabTAxaTSQCvQCv5EntryZQBb@Base 12
+ _D303TypeInfo_S3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv6blocks3tabFNaNdNfZQEgTaZQCbFMxAaZ9__lambda2VEQKbQKa18SortedRangeOptionsi0ZQKr6__initZ@Base 12
+ _D305TypeInfo_S3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv8uniProps3tabFNaNdNfZQEiTaZQCdFMxAaZ9__lambda2VEQKdQKc18SortedRangeOptionsi0ZQKt6__initZ@Base 12
+ _D308TypeInfo_S3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv7scripts3tabFNaNbNdNiNfZQElTaZQCgFMxAaZ9__lambda2VEQKgQKf18SortedRangeOptionsi0ZQKw6__initZ@Base 12
+ _D30TypeInfo_AC3std6socket7Address6__initZ@Base 12
+ _D30TypeInfo_AxS3std4file8DirEntry6__initZ@Base 12
+ _D30TypeInfo_AxS3std6getopt6Option6__initZ@Base 12
+ _D30TypeInfo_E3etc1c4curl8CurlAuth6__initZ@Base 12
+ _D30TypeInfo_E3etc1c4curl8CurlForm6__initZ@Base 12
+ _D30TypeInfo_E3etc1c4curl8CurlInfo6__initZ@Base 12
+ _D30TypeInfo_E3etc1c4curl8CurlPoll6__initZ@Base 12
+ _D30TypeInfo_E3etc1c4curl8CurlSeek6__initZ@Base 12
+ _D30TypeInfo_E3std3xml10DecodeMode6__initZ@Base 12
+ _D30TypeInfo_E3std6socket8socket_t6__initZ@Base 12
+ _D30TypeInfo_E3std6traits8Variadic6__initZ@Base 12
+ _D30TypeInfo_E3std8compiler6Vendor6__initZ@Base 12
+ _D30TypeInfo_E4core4time9ClockType6__initZ@Base 12
+ _D30TypeInfo_S2rt8monitor_7Monitor6__initZ@Base 12
+ _D30TypeInfo_S3etc1c4zlib8z_stream6__initZ@Base 12
+ _D30TypeInfo_S3std5stdio4File4Impl6__initZ@Base 12
+ _D30TypeInfo_S3std6format8NoOpSink6__initZ@Base 12
+ _D30TypeInfo_S4core5bitop8BitRange6__initZ@Base 12
+ _D30TypeInfo_xAS3std4file8DirEntry6__initZ@Base 12
+ _D30TypeInfo_xAS3std6getopt6Option6__initZ@Base 12
+ _D30TypeInfo_xS2rt9profilegc5Entry6__initZ@Base 12
+ _D30TypeInfo_xS3std4json9JSONValue6__initZ@Base 12
+ _D30TypeInfo_yS6object10ModuleInfo6__initZ@Base 12
+ _D31TypeInfo_C3gcc3deh11CxxTypeInfo6__initZ@Base 12
+ _D31TypeInfo_E3etc1c4curl9CurlError6__initZ@Base 12
+ _D31TypeInfo_E3etc1c4curl9CurlIoCmd6__initZ@Base 12
+ _D31TypeInfo_E3etc1c4curl9CurlPause6__initZ@Base 12
+ _D31TypeInfo_E3etc1c4curl9CurlProto6__initZ@Base 12
+ _D31TypeInfo_E3etc1c4curl9CurlProxy6__initZ@Base 12
+ _D31TypeInfo_E3etc1c4curl9CurlRedir6__initZ@Base 12
+ _D31TypeInfo_E3std4math10RealFormat6__initZ@Base 12
+ _D31TypeInfo_E3std7process8Redirect6__initZ@Base 12
+ _D31TypeInfo_PyS6object10ModuleInfo6__initZ@Base 12
+ _D31TypeInfo_S3etc1c4zlib9gz_header6__initZ@Base 12
+ _D31TypeInfo_S3gcc8sections3elf3DSO6__initZ@Base 12
+ _D31TypeInfo_S3std10checkedint4Warn6__initZ@Base 12
+ _D31TypeInfo_S3std11concurrency3Tid6__initZ@Base 12
+ _D31TypeInfo_S3std3net4curl7CurlAPI6__initZ@Base 12
+ _D31TypeInfo_S3std8typecons7Ternary6__initZ@Base 12
+ _D31TypeInfo_S4core5cpuid9CacheInfo6__initZ@Base 12
+ _D31TypeInfo_S4core6memory2GC5Stats6__initZ@Base 12
+ _D31TypeInfo_S4core6memory8BlkInfo_6__initZ@Base 12
+ _D31TypeInfo_S4core7runtime7Runtime6__initZ@Base 12
+ _D31TypeInfo_xS3std5stdio4File4Impl6__initZ@Base 12
+ _D31TypeInfo_yPS6object10ModuleInfo6__initZ@Base 12
+ _D32TypeInfo_AS3std11concurrency3Tid6__initZ@Base 12
+ _D32TypeInfo_AyPS6object10ModuleInfo6__initZ@Base 12
+ _D32TypeInfo_C6object6Object7Monitor6__initZ@Base 12
+ _D32TypeInfo_E3std4json11JSONOptions6__initZ@Base 12
+ _D32TypeInfo_E3std4uuid4UUID7Variant6__initZ@Base 12
+ _D32TypeInfo_E3std4uuid4UUID7Version6__initZ@Base 12
+ _D32TypeInfo_E3std5ascii10LetterCase6__initZ@Base 12
+ _D32TypeInfo_PxS3std5stdio4File4Impl6__initZ@Base 12
+ _D32TypeInfo_S2rt5minfo11ModuleGroup6__initZ@Base 12
+ _D32TypeInfo_S3std10checkedint5Abort6__initZ@Base 12
+ _D32TypeInfo_S3std10checkedint5Throw6__initZ@Base 12
+ _D32TypeInfo_S3std3net4curl3FTP4Impl6__initZ@Base 12
+ _D32TypeInfo_S3std3uni7unicode5block6__initZ@Base 12
+ _D32TypeInfo_S3std3uni__T5StackTkZQj6__initZ@Base 12
+ _D32TypeInfo_S3std4file11DirIterator6__initZ@Base 12
+ _D32TypeInfo_S3std5stdio10ChunksImpl6__initZ@Base 12
+ _D32TypeInfo_S3std8bitmanip8BitArray6__initZ@Base 12
+ _D32TypeInfo_S4core2gc6config6Config6__initZ@Base 12
+ _D32TypeInfo_S4core4stdc4fenv6fenv_t6__initZ@Base 12
+ _D32TypeInfo_S4core4sync5event5Event6__initZ@Base 12
+ _D32TypeInfo_S4core8demangle7NoHooks6__initZ@Base 12
+ _D32TypeInfo_S6object13__va_list_tag6__initZ@Base 12
+ _D32TypeInfo_xE3std7process8Redirect6__initZ@Base 12
+ _D32TypeInfo_xPS3std5stdio4File4Impl6__initZ@Base 12
+ _D32TypeInfo_xPyS6object10ModuleInfo6__initZ@Base 12
+ _D32TypeInfo_xS3gcc8sections3elf3DSO6__initZ@Base 12
+ _D32TypeInfo_xS3std11concurrency3Tid6__initZ@Base 12
+ _D32TypeInfo_yS4core5cpuid9CacheInfo6__initZ@Base 12
+ _D33TypeInfo_AxPyS6object10ModuleInfo6__initZ@Base 12
+ _D33TypeInfo_E3etc1c4curl10CurlFtpSSL6__initZ@Base 12
+ _D33TypeInfo_E3etc1c4curl10CurlGlobal6__initZ@Base 12
+ _D33TypeInfo_E3etc1c4curl10CurlKHStat6__initZ@Base 12
+ _D33TypeInfo_E3etc1c4curl10CurlKHType6__initZ@Base 12
+ _D33TypeInfo_E3etc1c4curl10CurlOption6__initZ@Base 12
+ _D33TypeInfo_E3etc1c4curl10CurlUseSSL6__initZ@Base 12
+ _D33TypeInfo_E3std4zlib12HeaderFormat6__initZ@Base 12
+ _D33TypeInfo_E3std5ascii11ControlChar6__initZ@Base 12
+ _D33TypeInfo_E3std6mmfile6MmFile4Mode6__initZ@Base 12
+ _D33TypeInfo_E3std6socket10SocketType6__initZ@Base 12
+ _D33TypeInfo_E3std8encoding9AsciiChar6__initZ@Base 12
+ _D33TypeInfo_E4core6memory2GC7BlkAttr6__initZ@Base 12
+ _D33TypeInfo_E4core9attribute7mustuse6__initZ@Base 12
+ _D33TypeInfo_PxS3gcc8sections3elf3DSO6__initZ@Base 12
+ _D33TypeInfo_S3etc1c4curl10curl_forms6__initZ@Base 12
+ _D33TypeInfo_S3etc1c4curl10curl_khkey6__initZ@Base 12
+ _D33TypeInfo_S3etc1c4curl10curl_slist6__initZ@Base 12
+ _D33TypeInfo_S3std3net4curl4HTTP4Impl6__initZ@Base 12
+ _D33TypeInfo_S3std3net4curl4SMTP4Impl6__initZ@Base 12
+ _D33TypeInfo_S3std3uni13ReallocPolicy6__initZ@Base 12
+ _D33TypeInfo_S3std3uni7unicode6script6__initZ@Base 12
+ _D33TypeInfo_S3std8datetime4date4Date6__initZ@Base 12
+ _D33TypeInfo_S4core2gc8registry5Entry6__initZ@Base 12
+ _D33TypeInfo_S4core4stdc5stdio6fpos_t6__initZ@Base 12
+ _D33TypeInfo_S4core4stdc6locale5lconv6__initZ@Base 12
+ _D33TypeInfo_S4core4stdc6stdlib5div_t6__initZ@Base 12
+ _D33TypeInfo_S6object14OffsetTypeInfo6__initZ@Base 12
+ _D33TypeInfo_xAPyS6object10ModuleInfo6__initZ@Base 12
+ _D33TypeInfo_xAyPS6object10ModuleInfo6__initZ@Base 12
+ _D33TypeInfo_xC6object6Object7Monitor6__initZ@Base 12
+ _D33TypeInfo_xPS3gcc8sections3elf3DSO6__initZ@Base 12
+ _D33TypeInfo_xS2rt5minfo11ModuleGroup6__initZ@Base 12
+ _D33TypeInfo_xS3std3net4curl3FTP4Impl6__initZ@Base 12
+ _D33TypeInfo_xS3std3uni__T5StackTkZQj6__initZ@Base 12
+ _D33TypeInfo_xS4core4sync5event5Event6__initZ@Base 12
+ _D33TypeInfo_xS4core8demangle7NoHooks6__initZ@Base 12
+ _D340TypeInfo_S3std8typecons__T5TupleTSQy9algorithm9iteration__T6joinerTSQCgQBjQBc__T9MapResultSQDd8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQFaQEdQDw__T12FilterResultSQGbQCyQCsQCmMxFNbNdZ9__lambda1TSQHh5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGdZQHcFQGyZQyTSQJlQIoQIh__TQElSQKbQGyQGsQGmMxFNbNdZ9__lambda3TSQLhQEa__TQDxTmTxmZQEgFmxmZQDyZQGzZQLz6__initZ@Base 12
+ _D345TypeInfo_S3std5range__T5chainTSQv9algorithm9iteration__T6joinerTSQCdQBjQBc__T9MapResultSQDa8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQExQEdQDw__T12FilterResultSQFyQCyQCsQCmMxFNbNdZ9__lambda1TSQHeQHd__T4iotaTmTmZQkFmmZ6ResultZQDcZQGaZQGzFQGvZQyTSQJfQIlQIe__TQEiSQJvQGvQGpQGjMxFNbNdZ9__lambda3TSQLbQLa__TQDxTmTxmZQEgFmxmZQDyZQGwZQLwFQLtQDmZQEr6__initZ@Base 12
+ _D34TypeInfo_AC3std3zip13ArchiveMember6__initZ@Base 12
+ _D34TypeInfo_AE3std8encoding9AsciiChar6__initZ@Base 12
+ _D34TypeInfo_C4core2gc11gcinterface2GC6__initZ@Base 12
+ _D34TypeInfo_E3etc1c4curl11CurlCSelect6__initZ@Base 12
+ _D34TypeInfo_E3etc1c4curl11CurlFormAdd6__initZ@Base 12
+ _D34TypeInfo_E3etc1c4curl11CurlFtpAuth6__initZ@Base 12
+ _D34TypeInfo_E3etc1c4curl11CurlIoError6__initZ@Base 12
+ _D34TypeInfo_E3etc1c4curl11CurlKHMatch6__initZ@Base 12
+ _D34TypeInfo_E3etc1c4curl11CurlMOption6__initZ@Base 12
+ _D34TypeInfo_E3etc1c4curl11CurlRtspReq6__initZ@Base 12
+ _D34TypeInfo_E3etc1c4curl11CurlSeekPos6__initZ@Base 12
+ _D34TypeInfo_E3etc1c4curl11CurlShError6__initZ@Base 12
+ _D34TypeInfo_E3etc1c4curl11CurlSshAuth6__initZ@Base 12
+ _D34TypeInfo_E3etc1c4curl11CurlTlsAuth6__initZ@Base 12
+ _D34TypeInfo_E3etc1c4curl11CurlVersion6__initZ@Base 12
+ _D34TypeInfo_E3gcc6config11ThreadModel6__initZ@Base 12
+ _D34TypeInfo_E3std4path13CaseSensitive6__initZ@Base 12
+ _D34TypeInfo_E3std5range12SearchPolicy6__initZ@Base 12
+ _D34TypeInfo_E3std6socket11SocketFlags6__initZ@Base 12
+ _D34TypeInfo_E3std8datetime4date5Month6__initZ@Base 12
+ _D34TypeInfo_HAyaxS3std4json9JSONValue6__initZ@Base 12
+ _D34TypeInfo_HS3std11concurrency3Tidxb6__initZ@Base 12
+ _D34TypeInfo_S3std10checkedint7WithNaN6__initZ@Base 12
+ _D34TypeInfo_S3std3uni14MatcherConcept6__initZ@Base 12
+ _D34TypeInfo_S3std6socket11AddressInfo6__initZ@Base 12
+ _D34TypeInfo_S4core3sys5posix6direntQh6__initZ@Base 12
+ _D34TypeInfo_S4core4stdc6stdlib6ldiv_t6__initZ@Base 12
+ _D34TypeInfo_S4core4time12TickDuration6__initZ@Base 12
+ _D34TypeInfo_S4core5cpuid11CpuFeatures6__initZ@Base 12
+ _D34TypeInfo_xC3std3zip13ArchiveMember6__initZ@Base 12
+ _D34TypeInfo_xE3std6socket10SocketType6__initZ@Base 12
+ _D34TypeInfo_xHAyaS3std4json9JSONValue6__initZ@Base 12
+ _D34TypeInfo_xHS3std11concurrency3Tidb6__initZ@Base 12
+ _D34TypeInfo_xS3etc1c4curl10curl_slist6__initZ@Base 12
+ _D34TypeInfo_xS3std3net4curl4HTTP4Impl6__initZ@Base 12
+ _D35TypeInfo_AC4core6thread5fiber5Fiber6__initZ@Base 12
+ _D35TypeInfo_AS3std6socket11AddressInfo6__initZ@Base 12
+ _D35TypeInfo_AxC3std3zip13ArchiveMember6__initZ@Base 12
+ _D35TypeInfo_C3std8typecons10Structural6__initZ@Base 12
+ _D35TypeInfo_E3etc1c4curl12CurlFileType6__initZ@Base 12
+ _D35TypeInfo_E3etc1c4curl12CurlLockData6__initZ@Base 12
+ _D35TypeInfo_E3etc1c4curl12CurlReadFunc6__initZ@Base 12
+ _D35TypeInfo_E3etc1c4curl12CurlShOption6__initZ@Base 12
+ _D35TypeInfo_E3etc1c4curl12CurlSockType6__initZ@Base 12
+ _D35TypeInfo_E3etc1c4curl12CurlTimeCond6__initZ@Base 12
+ _D35TypeInfo_E3std11concurrency7MsgType6__initZ@Base 12
+ _D35TypeInfo_E3std3net4curl4HTTP6Method6__initZ@Base 12
+ _D35TypeInfo_E3std5regex8internal2ir2IR6__initZ@Base 12
+ _D35TypeInfo_E3std5stdio13StdFileHandle6__initZ@Base 12
+ _D35TypeInfo_E3std6socket12ProtocolType6__initZ@Base 12
+ _D35TypeInfo_E3std6socket12SocketOption6__initZ@Base 12
+ _D35TypeInfo_E3std7process6Config5Flags6__initZ@Base 12
+ _D35TypeInfo_E3std8encoding10Latin1Char6__initZ@Base 12
+ _D35TypeInfo_E3std8encoding10Latin2Char6__initZ@Base 12
+ _D35TypeInfo_E4core6atomic11MemoryOrder6__initZ@Base 12
+ _D35TypeInfo_HAyaS3std11concurrency3Tid6__initZ@Base 12
+ _D35TypeInfo_PxS3etc1c4curl10curl_slist6__initZ@Base 12
+ _D35TypeInfo_S3gcc3deh15ExceptionHeader6__initZ@Base 12
+ _D35TypeInfo_S3std10checkedint8Saturate6__initZ@Base 12
+ _D35TypeInfo_S3std11concurrency7Message6__initZ@Base 12
+ _D35TypeInfo_S3std3net4curl7CurlAPI3API6__initZ@Base 12
+ _D35TypeInfo_S3std4json9JSONValue5Store6__initZ@Base 12
+ _D35TypeInfo_S3std5range__T6RepeatTaZQk6__initZ@Base 12
+ _D35TypeInfo_S3std6getopt12GetoptResult6__initZ@Base 12
+ _D35TypeInfo_S4core3sys5linux7ifaddrsQi6__initZ@Base 12
+ _D35TypeInfo_S4core3sys5posix3aio5aiocb6__initZ@Base 12
+ _D35TypeInfo_S4core3sys5posix3grp5group6__initZ@Base 12
+ _D35TypeInfo_S4core3sys5posix7termiosQi6__initZ@Base 12
+ _D35TypeInfo_S4core4stdc5stdio8_IO_FILE6__initZ@Base 12
+ _D35TypeInfo_S4core4stdc6stdlib7lldiv_t6__initZ@Base 12
+ _D35TypeInfo_S4core9attribute9gnuAbiTag6__initZ@Base 12
+ _D35TypeInfo_xAC3std3zip13ArchiveMember6__initZ@Base 12
+ _D35TypeInfo_xPS3etc1c4curl10curl_slist6__initZ@Base 12
+ _D35TypeInfo_xS3std6socket11AddressInfo6__initZ@Base 12
+ _D36TypeInfo_AE3std8encoding10Latin1Char6__initZ@Base 12
+ _D36TypeInfo_AE3std8encoding10Latin2Char6__initZ@Base 12
+ _D36TypeInfo_AxS3std6socket11AddressInfo6__initZ@Base 12
+ _D36TypeInfo_E3etc1c4curl13CurlFtpMethod6__initZ@Base 12
+ _D36TypeInfo_E3etc1c4curl13CurlIpResolve6__initZ@Base 12
+ _D36TypeInfo_E3std3net7isemail9EmailPart6__initZ@Base 12
+ _D36TypeInfo_E3std5range14StoppingPolicy6__initZ@Base 12
+ _D36TypeInfo_E3std6socket13AddressFamily6__initZ@Base 12
+ _D36TypeInfo_E4core4stdc6config8__c_long6__initZ@Base 12
+ _D36TypeInfo_FC3std3xml13ElementParserZv6__initZ@Base 12
+ _D36TypeInfo_FZC4core2gc11gcinterface2GC6__initZ@Base 12
+ _D36TypeInfo_HS3std11concurrency3TidAAya6__initZ@Base 12
+ _D36TypeInfo_S2rt6dmain214UnitTestResult6__initZ@Base 12
+ _D36TypeInfo_S3etc1c4curl13curl_certinfo6__initZ@Base 12
+ _D36TypeInfo_S3etc1c4curl13curl_fileinfo6__initZ@Base 12
+ _D36TypeInfo_S3etc1c4curl13curl_httppost6__initZ@Base 12
+ _D36TypeInfo_S3etc1c4curl13curl_sockaddr6__initZ@Base 12
+ _D36TypeInfo_S3gcc9backtrace10SymbolInfo6__initZ@Base 12
+ _D36TypeInfo_S3std4file15DirIteratorImpl6__initZ@Base 12
+ _D36TypeInfo_S3std5range__T6ChunksTAhZQl6__initZ@Base 12
+ _D36TypeInfo_S3std5stdio14ReadlnAppender6__initZ@Base 12
+ _D36TypeInfo_S3std6getopt13configuration6__initZ@Base 12
+ _D36TypeInfo_S3std7process12ProcessPipes6__initZ@Base 12
+ _D36TypeInfo_S4core2gc11gcinterface4Root6__initZ@Base 12
+ _D36TypeInfo_S4core3sys5linux2fs7fsxattr6__initZ@Base 12
+ _D36TypeInfo_S4core3sys5posix3pwd6passwd6__initZ@Base 12
+ _D36TypeInfo_S4core3sys5posix6dirent3DIR6__initZ@Base 12
+ _D36TypeInfo_xAS3std6socket11AddressInfo6__initZ@Base 12
+ _D36TypeInfo_xE3std11concurrency7MsgType6__initZ@Base 12
+ _D36TypeInfo_xE3std3net4curl4HTTP6Method6__initZ@Base 12
+ _D36TypeInfo_xE3std6socket12ProtocolType6__initZ@Base 12
+ _D36TypeInfo_xS3gcc3deh15ExceptionHeader6__initZ@Base 12
+ _D36TypeInfo_xS3std11concurrency7Message6__initZ@Base 12
+ _D37TypeInfo_C3std11concurrency9Scheduler6__initZ@Base 12
+ _D37TypeInfo_C6object9Throwable9TraceInfo6__initZ@Base 12
+ _D37TypeInfo_DFC3std3xml13ElementParserZv6__initZ@Base 12
+ _D37TypeInfo_E3etc1c4curl14CurlLockAccess6__initZ@Base 12
+ _D37TypeInfo_E3etc1c4curl14CurlSslVersion6__initZ@Base 12
+ _D37TypeInfo_E3std3uni17NormalizationForm6__initZ@Base 12
+ _D37TypeInfo_E3std3zip17CompressionMethod6__initZ@Base 12
+ _D37TypeInfo_E3std4json16JSONFloatLiteral6__initZ@Base 12
+ _D37TypeInfo_E3std4math9algebraic7PowType6__initZ@Base 12
+ _D37TypeInfo_E3std6socket14SocketShutdown6__initZ@Base 12
+ _D37TypeInfo_E3std7process13InternalError6__initZ@Base 12
+ _D37TypeInfo_E3std8internal4test3uda4Attr6__initZ@Base 12
+ _D37TypeInfo_E3std8typecons12TypeModifier6__initZ@Base 12
+ _D37TypeInfo_E4core4stdc6config9__c_ulong6__initZ@Base 12
+ _D37TypeInfo_HAyaC3std3zip13ArchiveMember6__initZ@Base 12
+ _D37TypeInfo_PFZC4core2gc11gcinterface2GC6__initZ@Base 12
+ _D37TypeInfo_PxS3gcc3deh15ExceptionHeader6__initZ@Base 12
+ _D37TypeInfo_S3gcc8sections3elf9ThreadDSO6__initZ@Base 12
+ _D37TypeInfo_S3gcc8sections3elf9tls_index6__initZ@Base 12
+ _D37TypeInfo_S3std3net4curl12AutoProtocol6__initZ@Base 12
+ _D37TypeInfo_S3std3uni17CodepointInterval6__initZ@Base 12
+ _D37TypeInfo_S3std8datetime4date8DateTime6__initZ@Base 12
+ _D37TypeInfo_S3std9container5dlist6DRange6__initZ@Base 12
+ _D37TypeInfo_S4core2gc11gcinterface5Range6__initZ@Base 12
+ _D37TypeInfo_S4core3sys5posix3aio7aiocb646__initZ@Base 12
+ _D37TypeInfo_S4core3sys5posix4poll6pollfd6__initZ@Base 12
+ _D37TypeInfo_S4core3sys5posix5fcntl5flock6__initZ@Base 12
+ _D37TypeInfo_S4core3sys5posixQk3uio5iovec6__initZ@Base 12
+ _D37TypeInfo_S4core3sys5posixQk7utsnameQi6__initZ@Base 12
+ _D37TypeInfo_S4core4stdc6wchar_9mbstate_t6__initZ@Base 12
+ _D37TypeInfo_S4core6stdcpp4new_9nothrow_t6__initZ@Base 12
+ _D37TypeInfo_xE3std6socket13AddressFamily6__initZ@Base 12
+ _D37TypeInfo_xPS3gcc3deh15ExceptionHeader6__initZ@Base 12
+ _D37TypeInfo_xS3std4file15DirIteratorImpl6__initZ@Base 12
+ _D37TypeInfo_xS4core3sys5posix6dirent3DIR6__initZ@Base 12
+ _D38TypeInfo_AS3std3uni17CodepointInterval6__initZ@Base 12
+ _D38TypeInfo_E3etc1c4curl15CurlClosePolicy6__initZ@Base 12
+ _D38TypeInfo_E3etc1c4curl15CurlFnMAtchFunc6__initZ@Base 12
+ _D38TypeInfo_E3etc1c4curl15CurlHttpVersion6__initZ@Base 12
+ _D38TypeInfo_E3etc1c4curl15CurlNetRcOption6__initZ@Base 12
+ _D38TypeInfo_E3std3net7isemail10AsciiToken6__initZ@Base 12
+ _D38TypeInfo_E3std5stdio4File11Orientation6__initZ@Base 12
+ _D38TypeInfo_E3std8datetime4date9DayOfWeek6__initZ@Base 12
+ _D38TypeInfo_PxS4core3sys5posix6dirent3DIR6__initZ@Base 12
+ _D38TypeInfo_S3gcc3deh18CxaExceptionHeader6__initZ@Base 12
+ _D38TypeInfo_S3std3zip10ZipArchive7Segment6__initZ@Base 12
+ _D38TypeInfo_S3std4math8hardware9IeeeFlags6__initZ@Base 12
+ _D38TypeInfo_S3std5array__T8AppenderTAaZQn6__initZ@Base 12
+ _D38TypeInfo_S3std5regex__T8CapturesTAaZQn6__initZ@Base 12
+ _D38TypeInfo_S3std5stdio4File11ByChunkImpl6__initZ@Base 12
+ _D38TypeInfo_S3std6digest6ripemd9RIPEMD1606__initZ@Base 12
+ _D38TypeInfo_S3std7complex__T7ComplexTeZQl6__initZ@Base 12
+ _D38TypeInfo_S3std7numeric__T6StrideTAfZQl6__initZ@Base 12
+ _D38TypeInfo_S3std8datetime4date9TimeOfDay6__initZ@Base 12
+ _D38TypeInfo_S4core2gc6config11PrettyBytes6__initZ@Base 12
+ _D38TypeInfo_S4core3sys5linux4link7r_debug6__initZ@Base 12
+ _D38TypeInfo_S4core3sys5posix4stdc4time2tm6__initZ@Base 12
+ _D38TypeInfo_S4core3sys5posix5netdb6netent6__initZ@Base 12
+ _D38TypeInfo_S4core3sys5posix6locale5lconv6__initZ@Base 12
+ _D38TypeInfo_S4core3sys5posixQk3msg6msgbuf6__initZ@Base 12
+ _D38TypeInfo_S4core8internal7convert5Float6__initZ@Base 12
+ _D38TypeInfo_xPFZC4core2gc11gcinterface2GC6__initZ@Base 12
+ _D38TypeInfo_xPS4core3sys5posix6dirent3DIR6__initZ@Base 12
+ _D38TypeInfo_xS3std3uni17CodepointInterval6__initZ@Base 12
+ _D38TypeInfo_xS4core2gc11gcinterface5Range6__initZ@Base 12
+ _D39TypeInfo_AC4core6thread8osthread6Thread6__initZ@Base 12
+ _D39TypeInfo_AS3std3zip10ZipArchive7Segment6__initZ@Base 12
+ _D39TypeInfo_AxS3std3uni17CodepointInterval6__initZ@Base 12
+ _D39TypeInfo_E3etc1c4curl16CurlCallbackInfo6__initZ@Base 12
+ _D39TypeInfo_E3etc1c4curl16CurlChunkBgnFunc6__initZ@Base 12
+ _D39TypeInfo_E3etc1c4curl16CurlChunkEndFunc6__initZ@Base 12
+ _D39TypeInfo_E3std11concurrency10OnCrowding6__initZ@Base 12
+ _D39TypeInfo_E3std11parallelism10TaskStatus6__initZ@Base 12
+ _D39TypeInfo_E3std5range17TransverseOptions6__initZ@Base 12
+ _D39TypeInfo_E3std6socket16AddressInfoFlags6__initZ@Base 12
+ _D39TypeInfo_S3gcc9backtrace13SymbolOrError6__initZ@Base 12
+ _D39TypeInfo_S3std11concurrency10ThreadInfo6__initZ@Base 12
+ _D39TypeInfo_S3std3net7isemail11EmailStatus6__initZ@Base 12
+ _D39TypeInfo_S3std5array__T8AppenderTAxaZQo6__initZ@Base 12
+ _D39TypeInfo_S3std5array__T8AppenderTAyaZQo6__initZ@Base 12
+ _D39TypeInfo_S3std5array__T8AppenderTAyuZQo6__initZ@Base 12
+ _D39TypeInfo_S3std5array__T8AppenderTAywZQo6__initZ@Base 12
+ _D39TypeInfo_S3std5array__T8AppenderTyAaZQo6__initZ@Base 12
+ _D39TypeInfo_S3std5regex__T8CapturesTAxaZQo6__initZ@Base 12
+ _D39TypeInfo_S3std5stdio17LockingTextReader6__initZ@Base 12
+ _D39TypeInfo_S3std7variant15FakeComplexReal6__initZ@Base 12
+ _D39TypeInfo_S3std8bitmanip__T7BitsSetTmZQl6__initZ@Base 12
+ _D39TypeInfo_S3std8datetime7systime7SysTime6__initZ@Base 12
+ _D39TypeInfo_S3std8typecons__T5TupleTaTaZQl6__initZ@Base 12
+ _D39TypeInfo_S3std8typecons__T5TupleTbTiZQl6__initZ@Base 12
+ _D39TypeInfo_S3std8typecons__T5TupleTbTkZQl6__initZ@Base 12
+ _D39TypeInfo_S3std8typecons__T5TupleTkTmZQl6__initZ@Base 12
+ _D39TypeInfo_S3std8typecons__T5TupleTmTmZQl6__initZ@Base 12
+ _D39TypeInfo_S3std8typecons__T5TupleTuTaZQl6__initZ@Base 12
+ _D39TypeInfo_S3std9container5dlist8BaseNode6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5linux3elf9Elf32_Dyn6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5linux3elf9Elf32_Lib6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5linux3elf9Elf32_Rel6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5linux3elf9Elf32_Sym6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5linux3elf9Elf64_Dyn6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5linux3elf9Elf64_Lib6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5linux3elf9Elf64_Rel6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5linux3elf9Elf64_Sym6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5linux4link8link_map6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5posix5dlfcn7Dl_info6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5posix5netdb7hostent6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5posix5netdb7servent6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5posix5utime7utimbuf6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5posix6signal6sigval6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5posixQk3msg7msginfo6__initZ@Base 12
+ _D39TypeInfo_S4core3sys5posixQk4stat6stat_t6__initZ@Base 12
+ _D39TypeInfo_S4core4stdc8inttypes9imaxdiv_t6__initZ@Base 12
+ _D39TypeInfo_S4core6memory2GC12ProfileStats6__initZ@Base 12
+ _D39TypeInfo_S4core6thread7context8Callable6__initZ@Base 12
+ _D39TypeInfo_S4core7runtime14UnitTestResult6__initZ@Base 12
+ _D39TypeInfo_S4core8internal2gc4bits6GCBits6__initZ@Base 12
+ _D39TypeInfo_xAS3std3uni17CodepointInterval6__initZ@Base 12
+ _D39TypeInfo_xS3std5regex__T8CapturesTAaZQn6__initZ@Base 12
+ _D3etc1c4curl10CurlGlobal6__initZ@Base 12
+ _D3etc1c4curl10CurlOption6__initZ@Base 12
+ _D3etc1c4curl10curl_forms6__initZ@Base 12
+ _D3etc1c4curl10curl_khkey6__initZ@Base 12
+ _D3etc1c4curl10curl_slist6__initZ@Base 12
+ _D3etc1c4curl11CurlCSelect6__initZ@Base 12
+ _D3etc1c4curl11CurlMOption6__initZ@Base 12
+ _D3etc1c4curl11CurlSshAuth6__initZ@Base 12
+ _D3etc1c4curl11CurlVersion6__initZ@Base 12
+ _D3etc1c4curl11__moduleRefZ@Base 12
+ _D3etc1c4curl12CurlReadFunc6__initZ@Base 12
+ _D3etc1c4curl12__ModuleInfoZ@Base 12
+ _D3etc1c4curl13curl_certinfo6__initZ@Base 12
+ _D3etc1c4curl13curl_fileinfo6__initZ@Base 12
+ _D3etc1c4curl13curl_httppost6__initZ@Base 12
+ _D3etc1c4curl13curl_sockaddr6__initZ@Base 12
+ _D3etc1c4curl18CurlFInfoFlagKnown6__initZ@Base 12
+ _D3etc1c4curl3_N26__initZ@Base 12
+ _D3etc1c4curl4_N286__initZ@Base 12
+ _D3etc1c4curl4_N316__initZ@Base 12
+ _D3etc1c4curl5CurlM6__initZ@Base 12
+ _D3etc1c4curl7CURLMsg6__initZ@Base 12
+ _D3etc1c4curl9CurlPause6__initZ@Base 12
+ _D3etc1c4curl9CurlProto6__initZ@Base 12
+ _D3etc1c4zlib11ZLIB_VERNUMyi@Base 12
+ _D3etc1c4zlib11__moduleRefZ@Base 12
+ _D3etc1c4zlib12ZLIB_VERSIONyAa@Base 12
+ _D3etc1c4zlib12__ModuleInfoZ@Base 12
+ _D3etc1c4zlib6Z_NULLyPv@Base 12
+ _D3etc1c4zlib8z_stream6__initZ@Base 12
+ _D3etc1c4zlib9gz_header6__initZ@Base 12
+ _D3gcc10attributes11__moduleRefZ@Base 12
+ _D3gcc10attributes12__ModuleInfoZ@Base 12
+ _D3gcc12libbacktrace11__moduleRefZ@Base 12
+ _D3gcc12libbacktrace12__ModuleInfoZ@Base 12
+ _D3gcc12libbacktrace15backtrace_state6__initZ@Base 12
+ _D3gcc3deh11CxxTypeInfo11__InterfaceZ@Base 12
+ _D3gcc3deh11__moduleRefZ@Base 12
+ _D3gcc3deh12__ModuleInfoZ@Base 12
+ _D3gcc3deh12getClassInfoFNiPSQBb6unwind7generic17_Unwind_ExceptionPxhZC14TypeInfo_Class@Base 12
+ _D3gcc3deh15ExceptionHeader11__xopEqualsMxFKxSQBsQBrQBqZb@Base 12
+ _D3gcc3deh15ExceptionHeader17toExceptionHeaderFNiPSQBx6unwind7generic17_Unwind_ExceptionZPSQDlQDkQDj@Base 12
+ _D3gcc3deh15ExceptionHeader3popFNiZPSQBjQBiQBh@Base 12
+ _D3gcc3deh15ExceptionHeader4freeFNiPSQBjQBiQBhZv@Base 12
+ _D3gcc3deh15ExceptionHeader4pushMFNiZv@Base 12
+ _D3gcc3deh15ExceptionHeader4saveFNiPSQBj6unwind7generic17_Unwind_ExceptionmiPxhmZv@Base 12
+ _D3gcc3deh15ExceptionHeader5stackPSQBhQBgQBf@Base 12
+ _D3gcc3deh15ExceptionHeader6__initZ@Base 12
+ _D3gcc3deh15ExceptionHeader6createFNiC6object9ThrowableZPSQCeQCdQCc@Base 12
+ _D3gcc3deh15ExceptionHeader7restoreFNiPSQBm6unwind7generic17_Unwind_ExceptionJiJPxhJmJmZv@Base 12
+ _D3gcc3deh15ExceptionHeader9__xtoHashFNbNeKxSQBrQBqQBpZm@Base 12
+ _D3gcc3deh15ExceptionHeader9ehstorageSQBkQBjQBi@Base 12
+ _D3gcc3deh17__gdc_personalityFimPSQBg6unwind7generic17_Unwind_ExceptionPSQCtQBnQBj15_Unwind_ContextZk@Base 12
+ _D3gcc3deh17actionTableLookupFiPSQBf6unwind7generic17_Unwind_ExceptionPxhQdmmQhhJbJbZi@Base 12
+ _D3gcc3deh18CONTINUE_UNWINDINGFPSQBf6unwind7generic17_Unwind_ExceptionPSQCsQBnQBj15_Unwind_ContextZk@Base 12
+ _D3gcc3deh18CxaExceptionHeader14getAdjustedPtrFPSQBv6unwind7generic17_Unwind_ExceptionCQDhQDg11CxxTypeInfoZPv@Base 12
+ _D3gcc3deh18CxaExceptionHeader17toExceptionHeaderFNiPSQCa6unwind7generic17_Unwind_ExceptionZPSQDoQDnQDm@Base 12
+ _D3gcc3deh18CxaExceptionHeader4saveFNiPSQBm6unwind7generic17_Unwind_ExceptionPvZv@Base 12
+ _D3gcc3deh18CxaExceptionHeader6__initZ@Base 12
+ _D3gcc3deh19isGdcExceptionClassFNimZb@Base 12
+ _D3gcc3deh19isGxxExceptionClassFNimZb@Base 12
+ _D3gcc3deh20isDependentExceptionFNimZb@Base 12
+ _D3gcc3deh8_d_throwUC6object9ThrowableZ17exception_cleanupUNikPSQCk6unwind7generic17_Unwind_ExceptionZv@Base 12
+ _D3gcc3deh8scanLSDAFPxhmiPSQz6unwind7generic17_Unwind_ExceptionPSQClQBnQBj15_Unwind_ContextmJmJiZk@Base 12
+ _D3gcc3deh9terminateFNiAyakZ11terminatingb@Base 12
+ _D3gcc3deh9terminateFNiAyakZv@Base 12
+ _D3gcc6config11__moduleRefZ@Base 12
+ _D3gcc6config12__ModuleInfoZ@Base 12
+ _D3gcc6emutls11__moduleRefZ@Base 12
+ _D3gcc6emutls12__ModuleInfoZ@Base 12
+ _D3gcc6unwind10arm_common11__moduleRefZ@Base 12
+ _D3gcc6unwind10arm_common12__ModuleInfoZ@Base 12
+ _D3gcc6unwind11__moduleRefZ@Base 12
+ _D3gcc6unwind12__ModuleInfoZ@Base 12
+ _D3gcc6unwind2pe11__moduleRefZ@Base 12
+ _D3gcc6unwind2pe12__ModuleInfoZ@Base 12
+ _D3gcc6unwind2pe12read_sleb128FNiKPxhZl@Base 12
+ _D3gcc6unwind2pe12read_uleb128FNiKPxhZm@Base 12
+ _D3gcc6unwind2pe18read_encoded_valueFNiPSQBnQBm7generic15_Unwind_ContexthKPxhZm@Base 12
+ _D3gcc6unwind2pe21base_of_encoded_valueFNihPSQBrQBq7generic15_Unwind_ContextZm@Base 12
+ _D3gcc6unwind2pe21size_of_encoded_valueFNihZk@Base 12
+ _D3gcc6unwind2pe28read_encoded_value_with_baseFNihmKPxhZm@Base 12
+ _D3gcc6unwind3arm11__moduleRefZ@Base 12
+ _D3gcc6unwind3arm12__ModuleInfoZ@Base 12
+ _D3gcc6unwind3c6x11__moduleRefZ@Base 12
+ _D3gcc6unwind3c6x12__ModuleInfoZ@Base 12
+ _D3gcc6unwind7generic11__moduleRefZ@Base 12
+ _D3gcc6unwind7generic12__ModuleInfoZ@Base 12
+ _D3gcc6unwind7generic17_Unwind_Exception6__initZ@Base 12
+ _D3gcc7gthread11__moduleRefZ@Base 12
+ _D3gcc7gthread12__ModuleInfoZ@Base 12
+ _D3gcc7gthread18__gthread_active_pFNbNiZi@Base 12
+ _D3gcc8builtins11__moduleRefZ@Base 12
+ _D3gcc8builtins12__ModuleInfoZ@Base 12
+ _D3gcc8builtins13__va_list_tag6__initZ@Base 12
+ _D3gcc8sections11__moduleRefZ@Base 12
+ _D3gcc8sections12__ModuleInfoZ@Base 12
+ _D3gcc8sections18pinLoadedLibrariesFNbNiZPv@Base 12
+ _D3gcc8sections20unpinLoadedLibrariesFNbNiPvZv@Base 12
+ _D3gcc8sections22cleanupLoadedLibrariesFNbNiZv@Base 12
+ _D3gcc8sections22inheritLoadedLibrariesFNbNiPvZv@Base 12
+ _D3gcc8sections3elf10_rtLoadingb@Base 12
+ _D3gcc8sections3elf11__moduleRefZ@Base 12
+ _D3gcc8sections3elf11_loadedDSOsFNbNcNdNiZ1xS4core8internal9container5array__T5ArrayTSQDgQDfQCz9ThreadDSOZQBc@Base 12
+ _D3gcc8sections3elf11_loadedDSOsFNbNcNdNiZS4core8internal9container5array__T5ArrayTSQDeQDdQCx9ThreadDSOZQBc@Base 12
+ _D3gcc8sections3elf11getTLSRangeFNbNimmZAv@Base 12
+ _D3gcc8sections3elf12__ModuleInfoZ@Base 12
+ _D3gcc8sections3elf12_handleToDSOFNbNcNdNiZ1xS4core8internal9container7hashtab__T7HashTabTPvTPSQDpQDoQDi3DSOZQBc@Base 12
+ _D3gcc8sections3elf12_handleToDSOFNbNcNdNiZS4core8internal9container7hashtab__T7HashTabTPvTPSQDnQDmQDg3DSOZQBc@Base 12
+ _D3gcc8sections3elf12decThreadRefFPSQBiQBhQBb3DSObZv@Base 12
+ _D3gcc8sections3elf12dsoForHandleFNbNiPvZPSQBpQBoQBi3DSO@Base 12
+ _D3gcc8sections3elf12finiSectionsFNbNiZv@Base 12
+ _D3gcc8sections3elf12incThreadRefFPSQBiQBhQBb3DSObZv@Base 12
+ _D3gcc8sections3elf12initSectionsFNbNiZv@Base 12
+ _D3gcc8sections3elf12scanSegmentsFNbNiIKS4core3sys5linux4link12dl_phdr_infoPSQCxQCwQCq3DSOZv@Base 12
+ _D3gcc8sections3elf13findThreadDSOFNbNiPSQBnQBmQBg3DSOZPSQCdQCcQBw9ThreadDSO@Base 12
+ _D3gcc8sections3elf13finiTLSRangesFNbNiPS4core8internal9container5array__T5ArrayTSQDcQDbQCv9ThreadDSOZQBcZv@Base 12
+ _D3gcc8sections3elf13handleForAddrFNbNiPvZQd@Base 12
+ _D3gcc8sections3elf13handleForNameFNbNixPaZPv@Base 12
+ _D3gcc8sections3elf13initTLSRangesFNbNiZPS4core8internal9container5array__T5ArrayTSQDdQDcQCw9ThreadDSOZQBc@Base 12
+ _D3gcc8sections3elf13runFinalizersFPSQBjQBiQBc3DSOZv@Base 12
+ _D3gcc8sections3elf13scanTLSRangesFNbPS4core8internal9container5array__T5ArrayTSQDaQCzQCt9ThreadDSOZQBcMDFNbPvQcZvZv@Base 12
+ _D3gcc8sections3elf15CompilerDSOData6__initZ@Base 12
+ _D3gcc8sections3elf15getDependenciesFNbNiIKS4core3sys5linux4link12dl_phdr_infoKSQBk8internal9container5array__T5ArrayTPSQEoQEnQEh3DSOZQxZv@Base 12
+ _D3gcc8sections3elf15setDSOForHandleFNbNiPSQBpQBoQBi3DSOPvZv@Base 12
+ _D3gcc8sections3elf16linkMapForHandleFNbNiPvZPS4core3sys5linux4link8link_map@Base 12
+ _D3gcc8sections3elf16registerGCRangesFNbNiPSQBqQBpQBj3DSOZv@Base 12
+ _D3gcc8sections3elf17_handleToDSOMutexS4core3sys5posixQk5types15pthread_mutex_t@Base 12
+ _D3gcc8sections3elf17unsetDSOForHandleFNbNiPSQBrQBqQBk3DSOPvZv@Base 12
+ _D3gcc8sections3elf18findDSOInfoForAddrFNbNiIPvPS4core3sys5linux4link12dl_phdr_infoZ2DG6__initZ@Base 12
+ _D3gcc8sections3elf18findDSOInfoForAddrFNbNiIPvPS4core3sys5linux4link12dl_phdr_infoZ8callbackUNbNiQBzmPvZi@Base 12
+ _D3gcc8sections3elf18findDSOInfoForAddrFNbNiIPvPS4core3sys5linux4link12dl_phdr_infoZb@Base 12
+ _D3gcc8sections3elf18findSegmentForAddrFNbNiIKS4core3sys5linux4link12dl_phdr_infoIPvPSQBnQBlQBkQDc10Elf64_PhdrZb@Base 12
+ _D3gcc8sections3elf18unregisterGCRangesFNbNiPSQBsQBrQBl3DSOZv@Base 12
+ _D3gcc8sections3elf20runModuleDestructorsFPSQBqQBpQBj3DSObZv@Base 12
+ _D3gcc8sections3elf21_isRuntimeInitializedb@Base 12
+ _D3gcc8sections3elf21runModuleConstructorsFPSQBrQBqQBk3DSObZv@Base 12
+ _D3gcc8sections3elf3DSO11__fieldDtorMFNbNiZv@Base 12
+ _D3gcc8sections3elf3DSO11__invariantMxFZv@Base 12
+ _D3gcc8sections3elf3DSO11__xopEqualsMxFKxSQBoQBnQBhQBgZb@Base 12
+ _D3gcc8sections3elf3DSO11moduleGroupMNgFNbNcNdNiNjZNgS2rt5minfo11ModuleGroup@Base 12
+ _D3gcc8sections3elf3DSO12__invariant0MxFZv@Base 12
+ _D3gcc8sections3elf3DSO14opApplyReverseFMDFKSQBrQBqQBkQBjZiZi@Base 12
+ _D3gcc8sections3elf3DSO6__initZ@Base 12
+ _D3gcc8sections3elf3DSO7modulesMxFNbNdNiZAyPS6object10ModuleInfo@Base 12
+ _D3gcc8sections3elf3DSO7opApplyFMDFKSQBjQBiQBcQBbZiZi@Base 12
+ _D3gcc8sections3elf3DSO8gcRangesMNgFNbNdNiZANgAv@Base 12
+ _D3gcc8sections3elf3DSO8opAssignMFNbNcNiNjSQBpQBoQBiQBhZQo@Base 12
+ _D3gcc8sections3elf3DSO8tlsRangeMxFNbNiZAv@Base 12
+ _D3gcc8sections3elf3DSO9__xtoHashFNbNeKxSQBnQBmQBgQBfZm@Base 12
+ _D3gcc8sections3elf7freeDSOFNbNiPSQBgQBfQz3DSOZv@Base 12
+ _D3gcc8sections3elf9ThreadDSO11__xopEqualsMxFKxSQBuQBtQBnQBmZb@Base 12
+ _D3gcc8sections3elf9ThreadDSO14updateTLSRangeMFNbNiZv@Base 12
+ _D3gcc8sections3elf9ThreadDSO6__initZ@Base 12
+ _D3gcc8sections3elf9ThreadDSO9__xtoHashFNbNeKxSQBtQBsQBmQBlZm@Base 12
+ _D3gcc8sections3elf9finiLocksFNbNiZv@Base 12
+ _D3gcc8sections3elf9initLocksFNbNiZv@Base 12
+ _D3gcc8sections3elf9sizeOfTLSFNbNiZm@Base 12
+ _D3gcc8sections3elf9tls_index6__initZ@Base 12
+ _D3gcc8sections3elf__T7toRangeTyPS6object10ModuleInfoZQBgFNaNbNiPyQBiQfZAyQBq@Base 12
+ _D3gcc8sections5macho11__moduleRefZ@Base 12
+ _D3gcc8sections5macho12__ModuleInfoZ@Base 12
+ _D3gcc8sections6common10safeAssertFNbNiNfbMAyaMQemZv@Base 12
+ _D3gcc8sections6common11__moduleRefZ@Base 12
+ _D3gcc8sections6common12__ModuleInfoZ@Base 12
+ _D3gcc8sections6pecoff11__moduleRefZ@Base 12
+ _D3gcc8sections6pecoff12__ModuleInfoZ@Base 12
+ _D3gcc9attribute11__moduleRefZ@Base 12
+ _D3gcc9attribute12__ModuleInfoZ@Base 12
+ _D3gcc9backtrace10SymbolInfo6__initZ@Base 12
+ _D3gcc9backtrace10formatLineFxSQBdQBc10SymbolInfoNkKG1536aZAa@Base 12
+ _D3gcc9backtrace11__moduleRefZ@Base 12
+ _D3gcc9backtrace12LibBacktrace11initializedb@Base 12
+ _D3gcc9backtrace12LibBacktrace16initLibBacktraceFZv@Base 12
+ _D3gcc9backtrace12LibBacktrace5statePSQBk12libbacktrace15backtrace_state@Base 12
+ _D3gcc9backtrace12LibBacktrace6__ctorMFiZCQBoQBnQBg@Base 12
+ _D3gcc9backtrace12LibBacktrace6__initZ@Base 12
+ _D3gcc9backtrace12LibBacktrace6__vtblZ@Base 12
+ _D3gcc9backtrace12LibBacktrace7__ClassZ@Base 12
+ _D3gcc9backtrace12LibBacktrace7opApplyMxFMDFKmKSQBuQBt13SymbolOrErrorZiZi@Base 12
+ _D3gcc9backtrace12LibBacktrace7opApplyMxFMDFKmKxAaZiZi@Base 12
+ _D3gcc9backtrace12LibBacktrace7opApplyMxFMDFKxAaZiZi@Base 12
+ _D3gcc9backtrace12LibBacktrace8toStringMxFZAya@Base 12
+ _D3gcc9backtrace12__ModuleInfoZ@Base 12
+ _D3gcc9backtrace13SymbolOrError6__initZ@Base 12
+ _D3gcc9backtrace18SymbolCallbackInfo5resetMFZv@Base 12
+ _D3gcc9backtrace18SymbolCallbackInfo6__initZ@Base 12
+ _D3gcc9backtrace19SymbolCallbackInfo26__initZ@Base 12
+ _D3std10checkedint11__moduleRefZ@Base 12
+ _D3std10checkedint12__ModuleInfoZ@Base 12
+ _D3std10checkedint13ProperCompare6__initZ@Base 12
+ _D3std10checkedint13ProperCompare__T9hookOpCmpTmTmZQpFNaNbNiNfmmZi@Base 12
+ _D3std10checkedint4Warn13trustedStderrFNbNcNdNiNeZSQBx5stdio4File@Base 12
+ _D3std10checkedint4Warn6__initZ@Base 12
+ _D3std10checkedint4Warn__T10onOverflowVAyaa1_2bTmTxlZQBbFNfmxlZm@Base 12
+ _D3std10checkedint4Warn__T10onOverflowVAyaa1_2bTmTxmZQBbFNfmxmZm@Base 12
+ _D3std10checkedint4Warn__T10onOverflowVAyaa1_2dTxmTmZQBbFNfxmmZm@Base 12
+ _D3std10checkedint4Warn__T12hookOpEqualsTxmTxmZQvFNfxmxmZb@Base 12
+ _D3std10checkedint4Warn__T9hookOpCmpTmTxmZQqFNfmxmZi@Base 12
+ _D3std10checkedint5Abort6__initZ@Base 12
+ _D3std10checkedint5Abort__T10onOverflowVAyaa1_2bTmTxlZQBbFNfmxlZm@Base 12
+ _D3std10checkedint5Abort__T10onOverflowVAyaa1_2bTmTxmZQBbFNfmxmZm@Base 12
+ _D3std10checkedint5Abort__T10onOverflowVAyaa1_2dTxmTmZQBbFNfxmmZm@Base 12
+ _D3std10checkedint5Abort__T12hookOpEqualsTxmTxmZQvFNfxmxmZb@Base 12
+ _D3std10checkedint5Abort__T9hookOpCmpTmTxmZQqFNfmxmZi@Base 12
+ _D3std10checkedint5Throw12CheckFailure6__initZ@Base 12
+ _D3std10checkedint5Throw12CheckFailure6__vtblZ@Base 12
+ _D3std10checkedint5Throw12CheckFailure7__ClassZ@Base 12
+ _D3std10checkedint5Throw6__initZ@Base 12
+ _D3std10checkedint7WithNaN6__initZ@Base 12
+ _D3std10checkedint8Saturate6__initZ@Base 12
+ _D3std10checkedint__T7CheckedTmTSQBfQBe5AbortZQz11__xopEqualsMxFKxSQCnQCm__TQCdTmTQByZQCnZb@Base 12
+ _D3std10checkedint__T7CheckedTmTSQBfQBe5AbortZQz3getMNgFNaNbNiNfZNgm@Base 12
+ _D3std10checkedint__T7CheckedTmTSQBfQBe5AbortZQz6__initZ@Base 12
+ _D3std10checkedint__T7CheckedTmTSQBfQBe5AbortZQz6toHashMxFNaNbNiNfZm@Base 12
+ _D3std10checkedint__T7CheckedTmTSQBfQBe5AbortZQz8__xopCmpMxFKxSQCjQCi__TQBzTmTQBuZQCjZi@Base 12
+ _D3std10checkedint__T7CheckedTmTSQBfQBe5AbortZQz__T10opOpAssignVAyaa1_2bTlZQyMFNcNjNfxlZSQDjQDi__TQCzTmTQCuZQDj@Base 12
+ _D3std10checkedint__T7CheckedTmTSQBfQBe5AbortZQz__T12opBinaryImplVAyaa1_2bTlTSQCyQCx__TQCoTmTQCjZQCyZQByMFNfxlZQBi@Base 12
+ _D3std10checkedint__T7CheckedTmTSQBfQBe5AbortZQz__T12opBinaryImplVAyaa1_2bTmTSQCyQCx__TQCoTmTQCjZQCyZQByMFNfxmZQBi@Base 12
+ _D3std10checkedint__T7CheckedTmTSQBfQBe5AbortZQz__T13opBinaryRightVAyaa1_2dTmZQBbMFNfxmZSQDjQDi__TQCzTmTQCuZQDj@Base 12
+ _D3std10checkedint__T7CheckedTmTSQBfQBe5AbortZQz__T17opBinaryRightImplVAyaa1_2dTmTSQDdQDc__TQCtTmTQCoZQDdZQCdMFNfxmZQBi@Base 12
+ _D3std10checkedint__T7CheckedTmTSQBfQBe5AbortZQz__T5opCmpTmTQBcTxSQCmQCl__TQCcTmTQBxZQCmZQBmMxFNaNbNiNfSQDyQDx__TQDoTmTQDjZQDyZi@Base 12
+ _D3std10checkedint__T7CheckedTmTSQBfQBe5AbortZQz__T5opCmpTmTSQChQCg__TQBxTmTQBsZQChZQBhMFNfxmZi@Base 12
+ _D3std10checkedint__T7CheckedTmTSQBfQBe5AbortZQz__T6__ctorTmZQkMFNaNbNcNiNfmZSQCyQCx__TQCoTmTQCjZQCy@Base 12
+ _D3std10checkedint__T7CheckedTmTSQBfQBe5AbortZQz__T6__ctorTxmZQlMFNaNbNcNiNfxmZSQDaQCz__TQCqTmTQClZQDa@Base 12
+ _D3std10checkedint__T7CheckedTmTSQBfQBe5AbortZQz__T8opBinaryVAyaa1_2bTlZQvMFNfxlZSQDcQDb__TQCsTmTQCnZQDc@Base 12
+ _D3std10checkedint__T7CheckedTmTSQBfQBe5AbortZQz__T8opBinaryVAyaa1_2bTmZQvMFNfxmZSQDcQDb__TQCsTmTQCnZQDc@Base 12
+ _D3std10checkedint__T7CheckedTmTSQBfQBe5AbortZQz__T8opEqualsTxSQCjQCi__TQBzTmTQBuZQCjTxQzZQBnMxFNfxQBlZb@Base 12
+ _D3std10checkedint__T7CheckedTmTSQBfQBe5AbortZQz__T8opEqualsTxmTxSQCmQCl__TQCcTmTQBxZQCmZQBmMxFNfxmZb@Base 12
+ _D3std10checkedint__T7checkedTSQBdQBc5AbortTmZQzFNaNbNiNfxmZSQChQCg__T7CheckedTmTQBzZQp@Base 12
+ _D3std10checkedint__T9opCheckedVAyaa1_2bTmTlZQyFNaNbNiNfxmxlKbZm@Base 12
+ _D3std10checkedint__T9opCheckedVAyaa1_2bTmTmZQyFNaNbNiNfxmxmKbZm@Base 12
+ _D3std10checkedint__T9opCheckedVAyaa1_2dTmTmZQyFNaNbNiNfxmxmKbZm@Base 12
+ _D3std10checkedint__T9opCheckedVAyaa2_3d3dTmTmZQBaFNaNbNiNfxmxmKbZb@Base 12
+ _D3std10checkedint__T9opCheckedVAyaa3_636d70TmTmZQBcFNaNbNiNfxmxmKbZi@Base 12
+ _D3std10functional11__moduleRefZ@Base 12
+ _D3std10functional11_ctfeSkipOpFKAyaZk@Base 12
+ _D3std10functional12__ModuleInfoZ@Base 12
+ _D3std10functional13_ctfeSkipNameFKAyaQdZk@Base 12
+ _D3std10functional15_ctfeMatchUnaryFAyaQdZk@Base 12
+ _D3std10functional16_ctfeMatchBinaryFAyaQdQfZk@Base 12
+ _D3std10functional16_ctfeSkipIntegerFKAyaZk@Base 12
+ _D3std10functional__T6safeOpVAyaa1_3cZ__T8unsafeOpTiTmZQoFNaNbNiNfimZb@Base 12
+ _D3std10functional__T6safeOpVAyaa1_3cZ__T8unsafeOpTmTiZQoFNaNbNiNfmiZb@Base 12
+ _D3std10functional__T6safeOpVAyaa1_3cZ__T8unsafeOpTmTlZQoFNaNbNiNfmlZb@Base 12
+ _D3std10functional__T6safeOpVAyaa1_3cZ__T8unsafeOpTmTyiZQpFNaNbNiNfmyiZb@Base 12
+ _D3std10functional__T6safeOpVAyaa1_3cZ__T8unsafeOpTyiTmZQpFNaNbNiNfyimZb@Base 12
+ _D3std10functional__T6safeOpVAyaa1_3cZ__TQuTiTmZQBbFNaNbNiNfKiKmZb@Base 12
+ _D3std10functional__T6safeOpVAyaa1_3cZ__TQuTmTiZQBbFNaNbNiNfKmKiZb@Base 12
+ _D3std10functional__T6safeOpVAyaa1_3cZ__TQuTmTlZQBbFNaNbNiNfKmKlZb@Base 12
+ _D3std10functional__T6safeOpVAyaa1_3cZ__TQuTmTyiZQBcFNaNbNiNfKmKyiZb@Base 12
+ _D3std10functional__T6safeOpVAyaa1_3cZ__TQuTmTymZQBcFNaNbNiNfKmKymZb@Base 12
+ _D3std10functional__T6safeOpVAyaa1_3cZ__TQuTyiTmZQBcFNaNbNiNfKyiKmZb@Base 12
+ _D3std10functional__T6safeOpVAyaa1_3cZ__TQuTymTmZQBcFNaNbNiNfKymKmZb@Base 12
+ _D3std10functional__T7memoizeS_DQBe5regex__T9regexImplTAyaZQpFNfxAyaAxaZSQCtQBp8internal2ir__T5RegexTaZQjVii8ZQDlFxQByQByZ11initializedAm@Base 12
+ _D3std10functional__T7memoizeS_DQBe5regex__T9regexImplTAyaZQpFNfxAyaAxaZSQCtQBp8internal2ir__T5RegexTaZQjVii8ZQDlFxQByQByZ4memoASQExQEw__TQEnS_DQFmQEi__TQEfTQDyZQEnQDzVii8ZQFvFxQEiQEiZ5Value@Base 12
+ _D3std10functional__T7memoizeS_DQBe5regex__T9regexImplTAyaZQpFNfxAyaAxaZSQCtQBp8internal2ir__T5RegexTaZQjVii8ZQDlFxQByQByZ5Value11__xopEqualsMxFKxSQFpQFo__TQFfS_DQGeQFa__TQExTQEqZQFfQErVii8ZQGnFxQFaQFaZQDcZb@Base 12
+ _D3std10functional__T7memoizeS_DQBe5regex__T9regexImplTAyaZQpFNfxAyaAxaZSQCtQBp8internal2ir__T5RegexTaZQjVii8ZQDlFxQByQByZ5Value6__initZ@Base 12
+ _D3std10functional__T7memoizeS_DQBe5regex__T9regexImplTAyaZQpFNfxAyaAxaZSQCtQBp8internal2ir__T5RegexTaZQjVii8ZQDlFxQByQByZ5Value9__xtoHashFNbNeKxSQFoQFn__TQFeS_DQGdQEz__TQEwTQEpZQFeQEqVii8ZQGmFxQEzQEzZQDbZm@Base 12
+ _D3std10functional__T7memoizeS_DQBe5regex__T9regexImplTAyaZQpFNfxAyaAxaZSQCtQBp8internal2ir__T5RegexTaZQjVii8ZQDlFxQByQByZQBy@Base 12
+ _D3std10functional__T8unaryFunVAyaa11_6120213d20612e4f70656eVQBea1_61Z__TQCaTEQCy3uni__T16UnicodeSetParserTSQEc5regex8internal6parser__T6ParserTQEjTSQFrQBpQBmQBg7CodeGenZQBiZQDi8OperatorZQGkFNaNbNiNfKQEtZb@Base 12
+ _D3std10functional__T8unaryFunVAyaa11_615b305d203e2030783830VQBea1_61Z__TQCaTxSQCz3uni17CodepointIntervalZQDhFNaNbNiNfKxQBqZb@Base 12
+ _D3std10functional__T8unaryFunVAyaa12_61203d3d20612e556e696f6eVQBga1_61Z__TQCcTEQDa3uni__T16UnicodeSetParserTSQEe5regex8internal6parser__T6ParserTQElTSQFtQBpQBmQBg7CodeGenZQBiZQDi8OperatorZQGmFNaNbNiNfKQEtZb@Base 12
+ _D3std10functional__T8unaryFunVAyaa4_615b305dVQpa1_61Z__TQBkTSQCi3uni17CodepointIntervalZQCqFNaNbNiNfQBoZk@Base 12
+ _D3std10functional__T8unaryFunVAyaa4_615b315dVQpa1_61Z__TQBkTSQCi3uni17CodepointIntervalZQCqFNaNbNiNfQBoZk@Base 12
+ _D3std10functional__T8unaryFunVAyaa5_612e726873VQra1_61Z__TQBmTySQCl8internal14unicode_tables9CompEntryZQDfFNaNbNiNfKyQCcZyw@Base 12
+ _D3std10functional__T8unaryFunVAyaa6_6120213d2030VQta1_61Z__TQBoTkZQBuFNaNbNiNfKkZb@Base 12
+ _D3std10functional__T8unaryFunVAyaa6_6120213d2030VQta1_61Z__TQBoTxkZQBvFNaNbNiNfKxkZb@Base 12
+ _D3std10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61Z__TQBoTySQCn8internal14unicode_tables15UnicodePropertyZQDoFNaNbNiNfKyQCjZyAa@Base 12
+ _D3std10functional__T9binaryFunVAyaa11_62203c20612e74696d6554VQBea1_61VQBna1_62Z__TQCkTySQDj8datetime8timezone13PosixTimeZone10LeapSecondTylZQEqFNaNbNiNfKyQCpKylZb@Base 12
+ _D3std10functional__T9binaryFunVAyaa11_62203c20612e74696d6554VQBea1_61VQBna1_62Z__TQCkTySQDj8datetime8timezone13PosixTimeZone10TransitionTlZQEpFNaNbNiNfKyQCoKlZb@Base 12
+ _D3std10functional__T9binaryFunVAyaa11_62203c20612e74696d6554VQBea1_61VQBna1_62Z__TQCkTySQDj8datetime8timezone13PosixTimeZone10TransitionTylZQEqFNaNbNiNfKyQCpKylZb@Base 12
+ _D3std10functional__T9binaryFunVAyaa15_612e6e616d65203c20622e6e616d65VQBma1_61VQBva1_62Z__TQCsTSQDq5regex8internal2ir10NamedGroupTQBjZQEjFNaNbNiNfKQCaKQCeZb@Base 12
+ _D3std10functional__T9binaryFunVAyaa17_612e74696d6554203c20622e74696d6554VQBqa1_61VQBza1_62Z__TQCwTSQDu8datetime8timezone13PosixTimeZone10LeapSecondTQByZQFcFNaNbNiNfKQCpKQCtZb@Base 12
+ _D3std10functional__T9binaryFunVAyaa17_612e74696d6554203c20622e74696d6554VQBqa1_61VQBza1_62Z__TQCwTSQDu8datetime8timezone13PosixTimeZone14TempTransitionTQCcZQFgFNaNbNiNfKQCtKQCxZb@Base 12
+ _D3std10functional__T9binaryFunVAyaa5_61202b2062VQra1_61VQza1_62Z__TQBvTkTkZQCdFNaNbNiNfKkKkZk@Base 12
+ _D3std10functional__T9binaryFunVAyaa5_61203c2062VQra1_61VQza1_62Z__TQBvTQBoTQBsZQChFNaNbNiNfKQCjKQCnZb@Base 12
+ _D3std10functional__T9binaryFunVAyaa5_61203c2062VQra1_61VQza1_62Z__TQBvTkTiZQCdFNaNbNiNfKkKiZb@Base 12
+ _D3std10functional__T9binaryFunVAyaa5_61203c2062VQra1_61VQza1_62Z__TQBvTywTwZQCeFNaNbNiNfKywKwZb@Base 12
+ _D3std10functional__T9binaryFunVAyaa6_61203c3d2062VQta1_61VQBba1_62Z__TQByTkTkZQCgFNaNbNiNfKkKkZb@Base 12
+ _D3std10functional__T9binaryFunVAyaa6_61203c3d2062VQta1_61VQBba1_62Z__TQByTkTyiZQChFNaNbNiNfKkKyiZb@Base 12
+ _D3std10functional__T9binaryFunVAyaa6_61203c3d2062VQta1_61VQBba1_62Z__TQByTxkTkZQChFNaNbNiNfKxkKkZb@Base 12
+ _D3std10functional__T9binaryFunVAyaa6_61203d3d2062VQta1_61VQBba1_62Z__TQByTQBrTQBvZQCkFNaNbNiNfKQCmKQCqZb@Base 12
+ _D3std10functional__T9binaryFunVAyaa6_61203d3d2062VQta1_61VQBba1_62Z__TQByTSQCw3uni__T13InversionListTSQDxQBb8GcPolicyZQBhTQBwZQEcFNaNbNiNfKQCnKQCrZb@Base 12
+ _D3std10functional__T9binaryFunVAyaa6_61203d3d2062VQta1_61VQBba1_62Z__TQByTaTaZQCgFNaNbNiNfaaZb@Base 12
+ _D3std10functional__T9binaryFunVAyaa6_61203d3d2062VQta1_61VQBba1_62Z__TQByThThZQCgFNaNbNiNfKhKhZb@Base 12
+ _D3std10functional__T9binaryFunVAyaa6_61203d3d2062VQta1_61VQBba1_62Z__TQByTkTkZQCgFNaNbNiNfKkKkZb@Base 12
+ _D3std10functional__T9binaryFunVAyaa6_61203d3d2062VQta1_61VQBba1_62Z__TQByTwTaZQCgFNaNbNiNfKwKaZb@Base 12
+ _D3std10functional__T9binaryFunVAyaa6_61203d3d2062VQta1_61VQBba1_62Z__TQByTyAaTQBvZQCkFNaNbNiNfKyQvKQCqZb@Base 12
+ _D3std10functional__T9binaryFunVAyaa6_61203d3d2062VQta1_61VQBba1_62Z__TQByTyaTaZQChFNaNbNiNfKyaKaZb@Base 12
+ _D3std10functional__T9binaryFunVAyaa6_61203d3d2062VQta1_61VQBba1_62Z__TQByTyaTwZQChFNaNbNiNfKyawZb@Base 12
+ _D3std10functional__T9binaryFunVAyaa6_61203d3d2062VQta1_61VQBba1_62Z__TQByTyaTyaZQCiFNaNbNiNfKyaKyaZb@Base 12
+ _D3std10functional__T9binaryFunVAyaa6_61203d3d2062VQta1_61VQBba1_62Z__TQByTyhTwZQChFNaNbNiNfKyhKwZb@Base 12
+ _D3std10functional__T9binaryFunVAyaa6_61203d3d2062VQta1_61VQBba1_62Z__TQByTyhTwZQChFNaNbNiNfKyhwZb@Base 12
+ _D3std10functional__T9binaryFunVAyaa6_61203d3d2062VQta1_61VQBba1_62Z__TQByTyhTxhZQCiFNaNbNiNfKyhKxhZb@Base 12
+ _D3std11__moduleRefZ@Base 12
+ _D3std11concurrency10MessageBox10setMaxMsgsMFNaNiNfmPFSQCbQCa3TidZbZv@Base 12
+ _D3std11concurrency10MessageBox12isControlMsgMFNaNbNiNfKSQCdQCc7MessageZb@Base 12
+ _D3std11concurrency10MessageBox13isLinkDeadMsgMFNaNbNiNfKSQCeQCd7MessageZb@Base 12
+ _D3std11concurrency10MessageBox13isPriorityMsgMFNaNbNiNfKSQCeQCd7MessageZb@Base 12
+ _D3std11concurrency10MessageBox14updateMsgCountMFNaNbNiNfZv@Base 12
+ _D3std11concurrency10MessageBox3putMFKSQBlQBk7MessageZv@Base 12
+ _D3std11concurrency10MessageBox5closeMFZ13onLinkDeadMsgFKSQCeQCd7MessageZv@Base 12
+ _D3std11concurrency10MessageBox5closeMFZ5sweepFKSQBvQBu__T4ListTSQClQCk7MessageZQwZv@Base 12
+ _D3std11concurrency10MessageBox5closeMFZv@Base 12
+ _D3std11concurrency10MessageBox6__ctorMFNbNeZCQBsQBrQBh@Base 12
+ _D3std11concurrency10MessageBox6__initZ@Base 12
+ _D3std11concurrency10MessageBox6__vtblZ@Base 12
+ _D3std11concurrency10MessageBox7__ClassZ@Base 12
+ _D3std11concurrency10MessageBox8isClosedMFNaNdNiNfZb@Base 12
+ _D3std11concurrency10MessageBox8mboxFullMFNaNbNiNfZb@Base 12
+ _D3std11concurrency10ThreadInfo11__xopEqualsMxFKxSQBwQBvQBlZb@Base 12
+ _D3std11concurrency10ThreadInfo6__initZ@Base 12
+ _D3std11concurrency10ThreadInfo7cleanupMFZv@Base 12
+ _D3std11concurrency10ThreadInfo8thisInfoFNbNcNdNiNfZSQBzQByQBo@Base 12
+ _D3std11concurrency10ThreadInfo8thisInfoFNbNcNdZ3valSQBzQByQBo@Base 12
+ _D3std11concurrency10ThreadInfo9__xtoHashFNbNeKxSQBvQBuQBkZm@Base 12
+ _D3std11concurrency10namesByTidHSQBfQBe3TidAAya@Base 12
+ _D3std11concurrency10unregisterFAyaZb@Base 12
+ _D3std11concurrency11IsGenerator11__InterfaceZ@Base 12
+ _D3std11concurrency11MailboxFull6__ctorMFNaNbNiNfSQBwQBv3TidAyaZCQClQCkQCa@Base 12
+ _D3std11concurrency11MailboxFull6__initZ@Base 12
+ _D3std11concurrency11MailboxFull6__vtblZ@Base 12
+ _D3std11concurrency11MailboxFull7__ClassZ@Base 12
+ _D3std11concurrency11__moduleRefZ@Base 12
+ _D3std11concurrency12__ModuleInfoZ@Base 12
+ _D3std11concurrency12initOnceLockFNdZ4lockOC4core4sync5mutex5Mutex@Base 12
+ _D3std11concurrency12initOnceLockFNdZOC4core4sync5mutex5Mutex@Base 12
+ _D3std11concurrency12registryLockFNdZ4implC4core4sync5mutex5Mutex@Base 12
+ _D3std11concurrency12registryLockFNdZC4core4sync5mutex5Mutex@Base 12
+ _D3std11concurrency12unregisterMeFKSQBiQBh10ThreadInfoZv@Base 12
+ _D3std11concurrency14FiberScheduler12newConditionMFNbC4core4sync5mutex5MutexZCQyQv9condition9Condition@Base 12
+ _D3std11concurrency14FiberScheduler14FiberCondition13switchContextMFNbZv@Base 12
+ _D3std11concurrency14FiberScheduler14FiberCondition4waitMFNbS4core4time8DurationZb@Base 12
+ _D3std11concurrency14FiberScheduler14FiberCondition4waitMFNbZv@Base 12
+ _D3std11concurrency14FiberScheduler14FiberCondition6__ctorMFNbC4core4sync5mutex5MutexZCQDhQDgQCwQCj@Base 12
+ _D3std11concurrency14FiberScheduler14FiberCondition6__initZ@Base 12
+ _D3std11concurrency14FiberScheduler14FiberCondition6__vtblZ@Base 12
+ _D3std11concurrency14FiberScheduler14FiberCondition6notifyMFNbZv@Base 12
+ _D3std11concurrency14FiberScheduler14FiberCondition7__ClassZ@Base 12
+ _D3std11concurrency14FiberScheduler14FiberCondition9notifyAllMFNbZv@Base 12
+ _D3std11concurrency14FiberScheduler5spawnMFNbDFZvZv@Base 12
+ _D3std11concurrency14FiberScheduler5startMFDFZvZv@Base 12
+ _D3std11concurrency14FiberScheduler5yieldMFNbZv@Base 12
+ _D3std11concurrency14FiberScheduler6__initZ@Base 12
+ _D3std11concurrency14FiberScheduler6__vtblZ@Base 12
+ _D3std11concurrency14FiberScheduler6createMFNbDFZvZv@Base 12
+ _D3std11concurrency14FiberScheduler7__ClassZ@Base 12
+ _D3std11concurrency14FiberScheduler8dispatchMFZv@Base 12
+ _D3std11concurrency14FiberScheduler8thisInfoMFNbNcNdZSQCaQBz10ThreadInfo@Base 12
+ _D3std11concurrency14FiberScheduler9InfoFiber6__ctorMFNbDFZvZCQCiQChQBxQBk@Base 12
+ _D3std11concurrency14FiberScheduler9InfoFiber6__ctorMFNbDFZvmZCQCjQCiQByQBl@Base 12
+ _D3std11concurrency14FiberScheduler9InfoFiber6__initZ@Base 12
+ _D3std11concurrency14FiberScheduler9InfoFiber6__vtblZ@Base 12
+ _D3std11concurrency14FiberScheduler9InfoFiber7__ClassZ@Base 12
+ _D3std11concurrency14LinkTerminated6__ctorMFNaNbNiNfSQBzQBy3TidAyaZCQCoQCnQCd@Base 12
+ _D3std11concurrency14LinkTerminated6__initZ@Base 12
+ _D3std11concurrency14LinkTerminated6__vtblZ@Base 12
+ _D3std11concurrency14LinkTerminated7__ClassZ@Base 12
+ _D3std11concurrency15MessageMismatch6__ctorMFNaNbNiNfAyaZCQCeQCdQBt@Base 12
+ _D3std11concurrency15MessageMismatch6__initZ@Base 12
+ _D3std11concurrency15MessageMismatch6__vtblZ@Base 12
+ _D3std11concurrency15MessageMismatch7__ClassZ@Base 12
+ _D3std11concurrency15OwnerTerminated6__ctorMFNaNbNiNfSQCaQBz3TidAyaZCQCpQCoQCe@Base 12
+ _D3std11concurrency15OwnerTerminated6__initZ@Base 12
+ _D3std11concurrency15OwnerTerminated6__vtblZ@Base 12
+ _D3std11concurrency15OwnerTerminated7__ClassZ@Base 12
+ _D3std11concurrency15ThreadScheduler12newConditionMFNbC4core4sync5mutex5MutexZCQyQv9condition9Condition@Base 12
+ _D3std11concurrency15ThreadScheduler5spawnMFDFZvZv@Base 12
+ _D3std11concurrency15ThreadScheduler5startMFDFZvZv@Base 12
+ _D3std11concurrency15ThreadScheduler5yieldMFNbZv@Base 12
+ _D3std11concurrency15ThreadScheduler6__initZ@Base 12
+ _D3std11concurrency15ThreadScheduler6__vtblZ@Base 12
+ _D3std11concurrency15ThreadScheduler7__ClassZ@Base 12
+ _D3std11concurrency15ThreadScheduler8thisInfoMFNbNcNdZSQCbQCa10ThreadInfo@Base 12
+ _D3std11concurrency15onCrowdingBlockFNaNbNiNfSQBsQBr3TidZb@Base 12
+ _D3std11concurrency15onCrowdingThrowFNaNfSQBoQBn3TidZb@Base 12
+ _D3std11concurrency16onCrowdingIgnoreFNaNbNiNfSQBtQBs3TidZb@Base 12
+ _D3std11concurrency17setMaxMailboxSizeFNaNfSQBqQBp3TidmEQCcQCb10OnCrowdingZv@Base 12
+ _D3std11concurrency17setMaxMailboxSizeFSQBmQBl3TidmPFQoZbZv@Base 12
+ _D3std11concurrency19TidMissingException6__initZ@Base 12
+ _D3std11concurrency19TidMissingException6__vtblZ@Base 12
+ _D3std11concurrency19TidMissingException7__ClassZ@Base 12
+ _D3std11concurrency19TidMissingException8__mixin26__ctorMFNaNbNiNfAyaC6object9ThrowableQvmZCQDmQDlQDb@Base 12
+ _D3std11concurrency19TidMissingException8__mixin26__ctorMFNaNbNiNfAyaQdmC6object9ThrowableZCQDmQDlQDb@Base 12
+ _D3std11concurrency19_staticDtor_L266_C1FZv@Base 12
+ _D3std11concurrency24PriorityMessageException11__fieldDtorMFNeZv@Base 12
+ _D3std11concurrency24PriorityMessageException6__ctorMFSQCb7variant__T8VariantNVmi32ZQpZCQDiQDhQCx@Base 12
+ _D3std11concurrency24PriorityMessageException6__initZ@Base 12
+ _D3std11concurrency24PriorityMessageException6__vtblZ@Base 12
+ _D3std11concurrency24PriorityMessageException7__ClassZ@Base 12
+ _D3std11concurrency3Tid11__xopEqualsMxFKxSQBoQBnQBdZb@Base 12
+ _D3std11concurrency3Tid6__ctorMFNaNbNcNiNfCQBpQBo10MessageBoxZSQCjQCiQBy@Base 12
+ _D3std11concurrency3Tid6__initZ@Base 12
+ _D3std11concurrency3Tid9__xtoHashFNbNeKxSQBnQBmQBcZm@Base 12
+ _D3std11concurrency3Tid__T8toStringTSQBj5array__T8AppenderTAyaZQoZQBoMxFNaNfKQBpZv@Base 12
+ _D3std11concurrency5yieldFNbZv@Base 12
+ _D3std11concurrency6locateFAyaZSQBeQBd3Tid@Base 12
+ _D3std11concurrency7Message11__fieldDtorMFNeZv@Base 12
+ _D3std11concurrency7Message11__xopEqualsMxFKxSQBsQBrQBhZb@Base 12
+ _D3std11concurrency7Message15__fieldPostblitMFNlZv@Base 12
+ _D3std11concurrency7Message6__initZ@Base 12
+ _D3std11concurrency7Message8opAssignMFNcNjSQBpQBoQBeZQl@Base 12
+ _D3std11concurrency7Message9__xtoHashFNbNeKxSQBrQBqQBgZm@Base 12
+ _D3std11concurrency7Message__T10convertsToTSQBqQBp3TidZQzMFNdZb@Base 12
+ _D3std11concurrency7Message__T3getTSQBiQBh3TidZQrMFNdZQt@Base 12
+ _D3std11concurrency7Message__T6__ctorTSQBlQBk3TidZQuMFNcEQCdQCc7MsgTypeQBhZSQCwQCvQCl@Base 12
+ _D3std11concurrency7thisTidFNdNfZ4trusFNeZSQBpQBo3Tid@Base 12
+ _D3std11concurrency7thisTidFNdNfZSQBgQBf3Tid@Base 12
+ _D3std11concurrency8ownerTidFNdZSQBfQBe3Tid@Base 12
+ _D3std11concurrency8registerFAyaSQBfQBe3TidZb@Base 12
+ _D3std11concurrency8thisInfoFNbNcNdZSQBjQBi10ThreadInfo@Base 12
+ _D3std11concurrency9Scheduler11__InterfaceZ@Base 12
+ _D3std11concurrency9schedulerCQBcQBb9Scheduler@Base 12
+ _D3std11concurrency9tidByNameHAyaSQBgQBf3Tid@Base 12
+ _D3std11concurrency__T4ListTSQBbQBa7MessageZQw3putMFNaNbNiNfKSQCiQCh__TQBxTQBvZQCfZv@Base 12
+ _D3std11concurrency__T4ListTSQBbQBa7MessageZQw3putMFNaNbNiNfPSQCiQCh__TQBxTQBvZQCf4NodeZv@Base 12
+ _D3std11concurrency__T4ListTSQBbQBa7MessageZQw3putMFQyZv@Base 12
+ _D3std11concurrency__T4ListTSQBbQBa7MessageZQw4Node11__fieldDtorMFNeZv@Base 12
+ _D3std11concurrency__T4ListTSQBbQBa7MessageZQw4Node11__xopEqualsMxFKxSQCqQCp__TQCfTQCdZQCnQBsZb@Base 12
+ _D3std11concurrency__T4ListTSQBbQBa7MessageZQw4Node15__fieldPostblitMFNlZv@Base 12
+ _D3std11concurrency__T4ListTSQBbQBa7MessageZQw4Node6__ctorMFNcQBiZSQCnQCm__TQCcTQCaZQCkQBp@Base 12
+ _D3std11concurrency__T4ListTSQBbQBa7MessageZQw4Node6__initZ@Base 12
+ _D3std11concurrency__T4ListTSQBbQBa7MessageZQw4Node8opAssignMFNcNjSQCnQCm__TQCcTQCaZQCkQBpZQz@Base 12
+ _D3std11concurrency__T4ListTSQBbQBa7MessageZQw4Node9__xtoHashFNbNeKxSQCpQCo__TQCeTQCcZQCmQBrZm@Base 12
+ _D3std11concurrency__T4ListTSQBbQBa7MessageZQw5Range5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std11concurrency__T4ListTSQBbQBa7MessageZQw5Range5frontMFNaNcNdNfZQBp@Base 12
+ _D3std11concurrency__T4ListTSQBbQBa7MessageZQw5Range5frontMFNdQBiZv@Base 12
+ _D3std11concurrency__T4ListTSQBbQBa7MessageZQw5Range6__ctorMFNaNbNcNiNfPSQCtQCs__TQCiTQCgZQCq4NodeZSQDuQDt__TQDjTQDhZQDrQCw@Base 12
+ _D3std11concurrency__T4ListTSQBbQBa7MessageZQw5Range6__initZ@Base 12
+ _D3std11concurrency__T4ListTSQBbQBa7MessageZQw5Range8popFrontMFNaNfZv@Base 12
+ _D3std11concurrency__T4ListTSQBbQBa7MessageZQw5clearMFNaNbNiNfZv@Base 12
+ _D3std11concurrency__T4ListTSQBbQBa7MessageZQw5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std11concurrency__T4ListTSQBbQBa7MessageZQw6__initZ@Base 12
+ _D3std11concurrency__T4ListTSQBbQBa7MessageZQw6lengthMFNaNbNdNiNfZm@Base 12
+ _D3std11concurrency__T4ListTSQBbQBa7MessageZQw7newNodeMFQBcZPSQCiQCh__TQBxTQBvZQCf4Node@Base 12
+ _D3std11concurrency__T4ListTSQBbQBa7MessageZQw7opSliceMFNaNbNiZSQCkQCj__TQBzTQBxZQCh5Range@Base 12
+ _D3std11concurrency__T4ListTSQBbQBa7MessageZQw7sm_headOPSQCdQCc__TQBsTQBqZQCa4Node@Base 12
+ _D3std11concurrency__T4ListTSQBbQBa7MessageZQw7sm_lockOSQCcQCb__TQBrTQBpZQBz8SpinLock@Base 12
+ _D3std11concurrency__T4ListTSQBbQBa7MessageZQw8SpinLock4lockMOFNbNiZv@Base 12
+ _D3std11concurrency__T4ListTSQBbQBa7MessageZQw8SpinLock6__initZ@Base 12
+ _D3std11concurrency__T4ListTSQBbQBa7MessageZQw8SpinLock6unlockMOFNaNbNiNfZv@Base 12
+ _D3std11concurrency__T4ListTSQBbQBa7MessageZQw8freeNodeMFPSQCfQCe__TQBuTQBsZQCc4NodeZv@Base 12
+ _D3std11concurrency__T4ListTSQBbQBa7MessageZQw8removeAtMFSQCeQCd__TQBtTQBrZQCb5RangeZv@Base 12
+ _D3std11concurrency__T5_sendTSQBcQBb3TidZQtFEQBrQBq7MsgTypeQBeQBhZv@Base 12
+ _D3std11concurrency__T8initOnceS_DQBg11parallelism8taskPoolFNdNeZ4poolCQCrQBl8TaskPoolZQCnFNcLQyOC4core4sync5mutex5MutexZ4flagOb@Base 12
+ _D3std11concurrency__T8initOnceS_DQBg11parallelism8taskPoolFNdNeZ4poolCQCrQBl8TaskPoolZQCnFNcLQyOC4core4sync5mutex5MutexZQBz@Base 12
+ _D3std11concurrency__T8initOnceS_DQBg11parallelism8taskPoolFNdNeZ4poolCQCrQBl8TaskPoolZQCnFNcLQyZQBb@Base 12
+ _D3std11concurrency__T8initOnceS_DQBg12experimental6logger4core22stdSharedDefaultLoggerCQDiQCcQBrQBn6LoggerZQDiFNcLQBcOCQCk4sync5mutex5MutexZ4flagOb@Base 12
+ _D3std11concurrency__T8initOnceS_DQBg12experimental6logger4core22stdSharedDefaultLoggerCQDiQCcQBrQBn6LoggerZQDiFNcLQBcOCQCk4sync5mutex5MutexZQCc@Base 12
+ _D3std11concurrency__T8initOnceS_DQBg12experimental6logger4core22stdSharedDefaultLoggerCQDiQCcQBrQBn6LoggerZQDiFNcLQBcZQBg@Base 12
+ _D3std11concurrency__T8initOnceS_DQBg12experimental9allocator17_processAllocatorOSQDcQBwQBl18RCISharedAllocatorZQDmFNcLOQBnOC4core4sync5mutex5MutexZ4flagOb@Base 12
+ _D3std11concurrency__T8initOnceS_DQBg12experimental9allocator17_processAllocatorOSQDcQBwQBl18RCISharedAllocatorZQDmFNcLOQBnZOQBs@Base 12
+ _D3std11concurrency__T8initOnceS_DQBg12experimental9allocator17_processAllocatorOSQDcQBwQBl18RCISharedAllocatorZQDmFNcNfLOQBpOC4core4sync5mutex5MutexZOQCs@Base 12
+ _D3std11concurrency__T8initOnceS_DQBg3net4curl7CurlAPI7_handlePvZQBrFNcLQkOC4core4sync5mutex5MutexZ4flagOb@Base 12
+ _D3std11concurrency__T8initOnceS_DQBg3net4curl7CurlAPI7_handlePvZQBrFNcLQkOC4core4sync5mutex5MutexZQBl@Base 12
+ _D3std11concurrency__T8initOnceS_DQBg3net4curl7CurlAPI7_handlePvZQBrFNcLQkZQn@Base 12
+ _D3std11concurrency__T8initOnceS_DQBg8datetime8timezone9LocalTime9singletonFNeZ5guardObZQCoFNcLObOC4core4sync5mutex5MutexZ4flagOb@Base 12
+ _D3std11concurrency__T8initOnceS_DQBg8datetime8timezone9LocalTime9singletonFNeZ5guardObZQCoFNcLObZOb@Base 12
+ _D3std11concurrency__T8initOnceS_DQBg8datetime8timezone9LocalTime9singletonFNeZ5guardObZQCoFNcNfLObOC4core4sync5mutex5MutexZOb@Base 12
+ _D3std11concurrency__T8initOnceS_DQBg8encoding14EncodingScheme6createFAyaZ11initializedObZQCqFNcLObOC4core4sync5mutex5MutexZ4flagOb@Base 12
+ _D3std11concurrency__T8initOnceS_DQBg8encoding14EncodingScheme6createFAyaZ11initializedObZQCqFNcLObZOb@Base 12
+ _D3std11concurrency__T8initOnceS_DQBg8encoding14EncodingScheme6createFAyaZ11initializedObZQCqFNcNfLObOC4core4sync5mutex5MutexZOb@Base 12
+ _D3std11concurrency__T8initOnceS_DQBgQBf12registryLockFNdZ4implC4core4sync5mutex5MutexZQCnFNcLQBfOCQBjQBhQBfQBcZ4flagOb@Base 12
+ _D3std11concurrency__T8initOnceS_DQBgQBf12registryLockFNdZ4implC4core4sync5mutex5MutexZQCnFNcLQBfOCQBjQBhQBfQBcZQBx@Base 12
+ _D3std11concurrency__T8initOnceS_DQBgQBf12registryLockFNdZ4implC4core4sync5mutex5MutexZQCnFNcLQBfZQBj@Base 12
+ _D3std11mathspecial11__moduleRefZ@Base 12
+ _D3std11mathspecial11logmdigammaFNaNbNiNfeZe@Base 12
+ _D3std11mathspecial12__ModuleInfoZ@Base 12
+ _D3std11mathspecial14betaIncompleteFNaNbNiNfeeeZe@Base 12
+ _D3std11mathspecial15gammaIncompleteFNaNbNiNfeeZe@Base 12
+ _D3std11mathspecial18logmdigammaInverseFNaNbNiNfeZe@Base 12
+ _D3std11mathspecial18normalDistributionFNaNbNiNfeZe@Base 12
+ _D3std11mathspecial20gammaIncompleteComplFNaNbNiNfeeZe@Base 12
+ _D3std11mathspecial21betaIncompleteInverseFNaNbNiNfeeeZe@Base 12
+ _D3std11mathspecial25normalDistributionInverseFNaNbNiNfeZe@Base 12
+ _D3std11mathspecial27gammaIncompleteComplInverseFNaNbNiNfeeZe@Base 12
+ _D3std11mathspecial3erfFNaNbNiNfeZe@Base 12
+ _D3std11mathspecial4betaFNaNbNiNfeeZe@Base 12
+ _D3std11mathspecial4erfcFNaNbNiNfeZe@Base 12
+ _D3std11mathspecial5gammaFNaNbNiNfeZe@Base 12
+ _D3std11mathspecial7digammaFNaNbNiNfeZe@Base 12
+ _D3std11mathspecial8logGammaFNaNbNiNfeZe@Base 12
+ _D3std11mathspecial8sgnGammaFNaNbNiNfeZe@Base 12
+ _D3std11parallelism10foreachErrFZv@Base 12
+ _D3std11parallelism11__moduleRefZ@Base 12
+ _D3std11parallelism12AbstractTask11__xopEqualsMxFKxSQByQBxQBnZb@Base 12
+ _D3std11parallelism12AbstractTask3jobMFZv@Base 12
+ _D3std11parallelism12AbstractTask4doneMFNdZb@Base 12
+ _D3std11parallelism12AbstractTask6__initZ@Base 12
+ _D3std11parallelism12AbstractTask9__xtoHashFNbNeKxSQBxQBwQBmZm@Base 12
+ _D3std11parallelism12__ModuleInfoZ@Base 12
+ _D3std11parallelism13totalCPUsImplFNbNiNeZk@Base 12
+ _D3std11parallelism16submitAndExecuteFCQBlQBk8TaskPoolMDFZvZv@Base 12
+ _D3std11parallelism17ParallelismThread6__ctorMFDFZvZCQBzQByQBo@Base 12
+ _D3std11parallelism17ParallelismThread6__initZ@Base 12
+ _D3std11parallelism17ParallelismThread6__vtblZ@Base 12
+ _D3std11parallelism17ParallelismThread7__ClassZ@Base 12
+ _D3std11parallelism17cacheLineSizeImplFNbNiNeZm@Base 12
+ _D3std11parallelism18defaultPoolThreadsFNdNeZk@Base 12
+ _D3std11parallelism18defaultPoolThreadsFNdNekZv@Base 12
+ _D3std11parallelism19_defaultPoolThreadsOk@Base 12
+ _D3std11parallelism20ParallelForeachError6__ctorMFZCQByQBxQBn@Base 12
+ _D3std11parallelism20ParallelForeachError6__initZ@Base 12
+ _D3std11parallelism20ParallelForeachError6__vtblZ@Base 12
+ _D3std11parallelism20ParallelForeachError7__ClassZ@Base 12
+ _D3std11parallelism26_sharedStaticDtor_L1074_C1FZv@Base 12
+ _D3std11parallelism8TaskPool10deleteItemMFPSQBqQBp12AbstractTaskZb@Base 12
+ _D3std11parallelism8TaskPool10waiterLockMFZv@Base 12
+ _D3std11parallelism8TaskPool11abstractPutMFPSQBrQBq12AbstractTaskZv@Base 12
+ _D3std11parallelism8TaskPool11queueUnlockMFZv@Base 12
+ _D3std11parallelism8TaskPool11threadIndexm@Base 12
+ _D3std11parallelism8TaskPool11workerIndexMxFNbNdNfZm@Base 12
+ _D3std11parallelism8TaskPool12doSingleTaskMFZv@Base 12
+ _D3std11parallelism8TaskPool12waiterUnlockMFZv@Base 12
+ _D3std11parallelism8TaskPool13notifyWaitersMFZv@Base 12
+ _D3std11parallelism8TaskPool13startWorkLoopMFZv@Base 12
+ _D3std11parallelism8TaskPool15executeWorkLoopMFZv@Base 12
+ _D3std11parallelism8TaskPool16deleteItemNoSyncMFPSQBwQBv12AbstractTaskZb@Base 12
+ _D3std11parallelism8TaskPool16tryDeleteExecuteMFPSQBwQBv12AbstractTaskZv@Base 12
+ _D3std11parallelism8TaskPool17abstractPutNoSyncMFPSQBxQBw12AbstractTaskZv@Base 12
+ _D3std11parallelism8TaskPool17nextInstanceIndexm@Base 12
+ _D3std11parallelism8TaskPool19defaultWorkUnitSizeMxFNaNbNfmZm@Base 12
+ _D3std11parallelism8TaskPool19waitUntilCompletionMFZv@Base 12
+ _D3std11parallelism8TaskPool22abstractPutGroupNoSyncMFPSQCcQCb12AbstractTaskQwZv@Base 12
+ _D3std11parallelism8TaskPool3popMFZPSQBjQBi12AbstractTask@Base 12
+ _D3std11parallelism8TaskPool4sizeMxFNaNbNdNfZm@Base 12
+ _D3std11parallelism8TaskPool4stopMFNeZv@Base 12
+ _D3std11parallelism8TaskPool4waitMFZv@Base 12
+ _D3std11parallelism8TaskPool5doJobMFPSQBkQBj12AbstractTaskZv@Base 12
+ _D3std11parallelism8TaskPool6__ctorMFNeZCQBnQBmQBc@Base 12
+ _D3std11parallelism8TaskPool6__ctorMFNemZCQBoQBnQBd@Base 12
+ _D3std11parallelism8TaskPool6__ctorMFPSQBlQBk12AbstractTaskiZCQCiQChQBx@Base 12
+ _D3std11parallelism8TaskPool6__initZ@Base 12
+ _D3std11parallelism8TaskPool6__vtblZ@Base 12
+ _D3std11parallelism8TaskPool6finishMFNebZv@Base 12
+ _D3std11parallelism8TaskPool6notifyMFZv@Base 12
+ _D3std11parallelism8TaskPool7__ClassZ@Base 12
+ _D3std11parallelism8TaskPool8isDaemonMFNdNeZb@Base 12
+ _D3std11parallelism8TaskPool8isDaemonMFNdNebZv@Base 12
+ _D3std11parallelism8TaskPool8priorityMFNdNeZi@Base 12
+ _D3std11parallelism8TaskPool8priorityMFNdNeiZv@Base 12
+ _D3std11parallelism8TaskPool9notifyAllMFZv@Base 12
+ _D3std11parallelism8TaskPool9popNoSyncMFZPSQBpQBo12AbstractTask@Base 12
+ _D3std11parallelism8TaskPool9queueLockMFZv@Base 12
+ _D3std11parallelism8taskPoolFNdNeZ4poolCQBmQBl8TaskPool@Base 12
+ _D3std11parallelism8taskPoolFNdNeZ9__lambda2FNfZCQBvQBu8TaskPool@Base 12
+ _D3std11parallelism8taskPoolFNdNeZCQBhQBg8TaskPool@Base 12
+ _D3std11parallelism__T10scopedTaskTDFZvZQsFMQjZSQBuQBt__T4TaskSQCjQCi3runTQBnZQv@Base 12
+ _D3std11parallelism__T14atomicCasUbyteTEQBmQBl8TaskPool9PoolStateZQBsFNaNbNiKQBmQBpQBsZb@Base 12
+ _D3std11parallelism__T14atomicSetUbyteTEQBmQBl8TaskPool9PoolStateZQBsFNaNbNiKQBmQBpZv@Base 12
+ _D3std11parallelism__T14atomicSetUbyteThZQtFNaNbNiKhhZv@Base 12
+ _D3std11parallelism__T15atomicReadUbyteTEQBnQBm8TaskPool9PoolStateZQBtFNaNbNiKQBmZh@Base 12
+ _D3std11parallelism__T15atomicReadUbyteThZQuFNaNbNiKhZh@Base 12
+ _D3std11parallelism__T27__lazilyInitializedConstantTykVki4294967295S_DQCqQCp13totalCPUsImplFNbNiNeZkZQDbFNaNbNdNiNfZyk@Base 12
+ _D3std11parallelism__T27__lazilyInitializedConstantTykVki4294967295S_DQCqQCp13totalCPUsImplFNbNiNeZkZQDbFNaNdZ4implFNbNiNfZyk@Base 12
+ _D3std11parallelism__T27__lazilyInitializedConstantTykVki4294967295S_DQCqQCp13totalCPUsImplFNbNiNeZkZQDbFNaNdZ4implFNbZ3tlsk@Base 12
+ _D3std11parallelism__T27__lazilyInitializedConstantTykVki4294967295S_DQCqQCp13totalCPUsImplFNbNiNeZkZQDbFNaNdZ4implFNbZ6resultOk@Base 12
+ _D3std11parallelism__T27__lazilyInitializedConstantTymVmN1S_DQChQCg17cacheLineSizeImplFNbNiNeZmZQCwFNaNbNdNiNfZym@Base 12
+ _D3std11parallelism__T27__lazilyInitializedConstantTymVmN1S_DQChQCg17cacheLineSizeImplFNbNiNeZmZQCwFNaNdZ4implFNbNiNfZym@Base 12
+ _D3std11parallelism__T27__lazilyInitializedConstantTymVmN1S_DQChQCg17cacheLineSizeImplFNbNiNeZmZQCwFNaNdZ4implFNbZ3tlsm@Base 12
+ _D3std11parallelism__T27__lazilyInitializedConstantTymVmN1S_DQChQCg17cacheLineSizeImplFNbNiNeZmZQCwFNaNdZ4implFNbZ6resultOm@Base 12
+ _D3std11parallelism__T3runTDFZvZQkFQiZv@Base 12
+ _D3std11parallelism__T4TaskSQBaQz3runTDFZvZQv10yieldForceMFNcNdNeZv@Base 12
+ _D3std11parallelism__T4TaskSQBaQz3runTDFZvZQv11__xopEqualsMxFKxSQCkQCj__TQBzSQCxQCwQByTQBxZQCrZb@Base 12
+ _D3std11parallelism__T4TaskSQBaQz3runTDFZvZQv11enforcePoolMFNaNfZv@Base 12
+ _D3std11parallelism__T4TaskSQBaQz3runTDFZvZQv18executeInNewThreadMFNeZv@Base 12
+ _D3std11parallelism__T4TaskSQBaQz3runTDFZvZQv18executeInNewThreadMFNeiZv@Base 12
+ _D3std11parallelism__T4TaskSQBaQz3runTDFZvZQv4doneMFNdNeZb@Base 12
+ _D3std11parallelism__T4TaskSQBaQz3runTDFZvZQv4implFPvZv@Base 12
+ _D3std11parallelism__T4TaskSQBaQz3runTDFZvZQv6__ctorMFNaNbNcNiNfQBaZSQCpQCo__TQCeSQDcQDbQCdTQCcZQCw@Base 12
+ _D3std11parallelism__T4TaskSQBaQz3runTDFZvZQv6__dtorMFNfZv@Base 12
+ _D3std11parallelism__T4TaskSQBaQz3runTDFZvZQv6__initZ@Base 12
+ _D3std11parallelism__T4TaskSQBaQz3runTDFZvZQv7basePtrMFNaNbNdNiNfZPSQCoQCn12AbstractTask@Base 12
+ _D3std11parallelism__T4TaskSQBaQz3runTDFZvZQv8opAssignMFNfSQCfQCe__TQBuSQCsQCrQBtTQBsZQCmZQBg@Base 12
+ _D3std11parallelism__T4TaskSQBaQz3runTDFZvZQv9__xtoHashFNbNeKxSQCjQCi__TQBySQCwQCvQBxTQBwZQCqZm@Base 12
+ _D3std11parallelism__T4TaskSQBaQz3runTDFZvZQv9spinForceMFNcNdNeZv@Base 12
+ _D3std11parallelism__T4TaskSQBaQz3runTDFZvZQv9workForceMFNcNdNeZv@Base 12
+ _D3std12__ModuleInfoZ@Base 12
+ _D3std12experimental10checkedint11__moduleRefZ@Base 12
+ _D3std12experimental10checkedint12__ModuleInfoZ@Base 12
+ _D3std12experimental6logger10filelogger10FileLogger10logMsgPartMFNfMAxaZv@Base 12
+ _D3std12experimental6logger10filelogger10FileLogger11__fieldDtorMFNeZv@Base 12
+ _D3std12experimental6logger10filelogger10FileLogger11beginLogMsgMFNfAyaiQeQgQiEQCzQCyQCn4core8LogLevelSQDx11concurrency3TidSQEs8datetime7systime7SysTimeCQFvQFuQFjQCw6LoggerZv@Base 12
+ _D3std12experimental6logger10filelogger10FileLogger11getFilenameMFZAya@Base 12
+ _D3std12experimental6logger10filelogger10FileLogger11writeLogMsgMFNfKSQCqQCpQCe4core6Logger8LogEntryZv@Base 12
+ _D3std12experimental6logger10filelogger10FileLogger12finishLogMsgMFNfZv@Base 12
+ _D3std12experimental6logger10filelogger10FileLogger4fileMFNdNfZSQCk5stdio4File@Base 12
+ _D3std12experimental6logger10filelogger10FileLogger6__ctorMFNfSQCj5stdio4FilexEQCzQCyQCn4core8LogLevelZCQDyQDxQDmQDiQCz@Base 12
+ _D3std12experimental6logger10filelogger10FileLogger6__ctorMFNfxAyaxEQCoQCnQCc4core8LogLevelEQDm8typecons__T4FlagVAyaa12_437265617465466f6c646572ZQBmZCQFsQFrQFgQFcQEt@Base 12
+ _D3std12experimental6logger10filelogger10FileLogger6__ctorMFNfxAyaxEQCoQCnQCc4core8LogLevelZCQDnQDmQDbQCxQCo@Base 12
+ _D3std12experimental6logger10filelogger10FileLogger6__initZ@Base 12
+ _D3std12experimental6logger10filelogger10FileLogger6__vtblZ@Base 12
+ _D3std12experimental6logger10filelogger10FileLogger7__ClassZ@Base 12
+ _D3std12experimental6logger10filelogger11__moduleRefZ@Base 12
+ _D3std12experimental6logger10filelogger12__ModuleInfoZ@Base 12
+ _D3std12experimental6logger10nulllogger10NullLogger11writeLogMsgMFNiNfKSQCsQCrQCg4core6Logger8LogEntryZv@Base 12
+ _D3std12experimental6logger10nulllogger10NullLogger6__ctorMFNfxEQCkQCjQBy4core8LogLevelZCQDjQDiQCxQCtQCk@Base 12
+ _D3std12experimental6logger10nulllogger10NullLogger6__initZ@Base 12
+ _D3std12experimental6logger10nulllogger10NullLogger6__vtblZ@Base 12
+ _D3std12experimental6logger10nulllogger10NullLogger7__ClassZ@Base 12
+ _D3std12experimental6logger10nulllogger11__moduleRefZ@Base 12
+ _D3std12experimental6logger10nulllogger12__ModuleInfoZ@Base 12
+ _D3std12experimental6logger11__moduleRefZ@Base 12
+ _D3std12experimental6logger11multilogger11MultiLogger11writeLogMsgMFNfKSQCsQCrQCg4core6Logger8LogEntryZv@Base 12
+ _D3std12experimental6logger11multilogger11MultiLogger12insertLoggerMFNfAyaCQCvQCuQCj4core6LoggerZv@Base 12
+ _D3std12experimental6logger11multilogger11MultiLogger12removeLoggerMFNfIAaZCQCwQCvQCk4core6Logger@Base 12
+ _D3std12experimental6logger11multilogger11MultiLogger6__ctorMFNfxEQCmQClQCa4core8LogLevelZCQDlQDkQCzQCvQCl@Base 12
+ _D3std12experimental6logger11multilogger11MultiLogger6__initZ@Base 12
+ _D3std12experimental6logger11multilogger11MultiLogger6__vtblZ@Base 12
+ _D3std12experimental6logger11multilogger11MultiLogger7__ClassZ@Base 12
+ _D3std12experimental6logger11multilogger11__moduleRefZ@Base 12
+ _D3std12experimental6logger11multilogger12__ModuleInfoZ@Base 12
+ _D3std12experimental6logger11multilogger16MultiLoggerEntry11__xopEqualsMxFKxSQCxQCwQClQChQBxZb@Base 12
+ _D3std12experimental6logger11multilogger16MultiLoggerEntry6__initZ@Base 12
+ _D3std12experimental6logger11multilogger16MultiLoggerEntry9__xtoHashFNbNeKxSQCwQCvQCkQCgQBwZm@Base 12
+ _D3std12experimental6logger12__ModuleInfoZ@Base 12
+ _D3std12experimental6logger4core10TestLogger11writeLogMsgMFNfKSQCjQCiQBxQBt6Logger8LogEntryZv@Base 12
+ _D3std12experimental6logger4core10TestLogger6__ctorMFNfxEQCdQCcQBrQBn8LogLevelZCQDaQCzQCoQCkQCi@Base 12
+ _D3std12experimental6logger4core10TestLogger6__initZ@Base 12
+ _D3std12experimental6logger4core10TestLogger6__vtblZ@Base 12
+ _D3std12experimental6logger4core10TestLogger7__ClassZ@Base 12
+ _D3std12experimental6logger4core11__moduleRefZ@Base 12
+ _D3std12experimental6logger4core12__ModuleInfoZ@Base 12
+ _D3std12experimental6logger4core14globalLogLevelFNdNfEQCaQBzQBoQBk8LogLevelZv@Base 12
+ _D3std12experimental6logger4core14globalLogLevelFNdNiNfZEQCdQCcQBrQBn8LogLevel@Base 12
+ _D3std12experimental6logger4core15stdSharedLoggerOCQBxQBwQBlQBh6Logger@Base 12
+ _D3std12experimental6logger4core16StdForwardLogger11writeLogMsgMFNfKSQCpQCoQCdQBz6Logger8LogEntryZv@Base 12
+ _D3std12experimental6logger4core16StdForwardLogger6__ctorMFNfxEQCjQCiQBxQBt8LogLevelZCQDgQDfQCuQCqQCo@Base 12
+ _D3std12experimental6logger4core16StdForwardLogger6__initZ@Base 12
+ _D3std12experimental6logger4core16StdForwardLogger6__vtblZ@Base 12
+ _D3std12experimental6logger4core16StdForwardLogger7__ClassZ@Base 12
+ _D3std12experimental6logger4core17stdThreadLocalLogFNdNfCQCdQCcQBrQBn6LoggerZv@Base 12
+ _D3std12experimental6logger4core17stdThreadLocalLogFNdNfZCQCeQCdQBsQBo6Logger@Base 12
+ _D3std12experimental6logger4core21stdLoggerThreadLoggerCQCcQCbQBqQBm6Logger@Base 12
+ _D3std12experimental6logger4core21stdThreadLocalLogImplFNdNeZ7_bufferG23Pv@Base 12
+ _D3std12experimental6logger4core21stdThreadLocalLogImplFNdNeZCQCiQChQBwQBs6Logger@Base 12
+ _D3std12experimental6logger4core22stdSharedDefaultLoggerCQCdQCcQBrQBn6Logger@Base 12
+ _D3std12experimental6logger4core23defaultSharedLoggerImplFNdNeZ7_bufferG224v@Base 12
+ _D3std12experimental6logger4core23defaultSharedLoggerImplFNdNeZ9__lambda2FZCQCwQCvQCk10filelogger10FileLogger@Base 12
+ _D3std12experimental6logger4core23defaultSharedLoggerImplFNdNeZCQCkQCjQByQBu6Logger@Base 12
+ _D3std12experimental6logger4core23stdLoggerGlobalLogLevelOEQCfQCeQBtQBp8LogLevel@Base 12
+ _D3std12experimental6logger4core28stdLoggerDefaultThreadLoggerCQCjQCiQBxQBt6Logger@Base 12
+ _D3std12experimental6logger4core6Logger10forwardMsgMFNeKSQCdQCcQBrQBnQBl8LogEntryZv@Base 12
+ _D3std12experimental6logger4core6Logger10logMsgPartMFNfMAxaZv@Base 12
+ _D3std12experimental6logger4core6Logger11beginLogMsgMFNfAyaiQeQgQiEQCnQCmQCbQBx8LogLevelSQDj11concurrency3TidSQEe8datetime7systime7SysTimeCQFhQFgQEvQErQEpZv@Base 12
+ _D3std12experimental6logger4core6Logger12fatalHandlerMFNdNiNfDFNfZvZv@Base 12
+ _D3std12experimental6logger4core6Logger12fatalHandlerMFNdNiNfZDFZv@Base 12
+ _D3std12experimental6logger4core6Logger12finishLogMsgMFNfZv@Base 12
+ _D3std12experimental6logger4core6Logger6__ctorMFNfEQBxQBwQBlQBh8LogLevelZCQCuQCtQCiQCeQCc@Base 12
+ _D3std12experimental6logger4core6Logger6__initZ@Base 12
+ _D3std12experimental6logger4core6Logger6__vtblZ@Base 12
+ _D3std12experimental6logger4core6Logger7__ClassZ@Base 12
+ _D3std12experimental6logger4core6Logger8LogEntry11__xopEqualsMxFKxSQCnQCmQCbQBxQBvQBrZb@Base 12
+ _D3std12experimental6logger4core6Logger8LogEntry6__initZ@Base 12
+ _D3std12experimental6logger4core6Logger8LogEntry8opAssignMFNaNbNcNiNjNfSQCsQCrQCgQCcQCaQBwZQu@Base 12
+ _D3std12experimental6logger4core6Logger8LogEntry9__xtoHashFNbNeKxSQCmQClQCaQBwQBuQBqZm@Base 12
+ _D3std12experimental6logger4core6Logger8logLevelMFNdNiNfxEQCeQCdQBsQBo8LogLevelZv@Base 12
+ _D3std12experimental6logger4core6Logger8logLevelMxFNaNdNiNfZEQChQCgQBvQBr8LogLevel@Base 12
+ _D3std12experimental6logger4core8LogLevel6__initZ@Base 12
+ _D3std12experimental6logger4core8MsgRange11__xopEqualsMxFKxSQCgQCfQBuQBqQBoZb@Base 12
+ _D3std12experimental6logger4core8MsgRange3putMFNfwZv@Base 12
+ _D3std12experimental6logger4core8MsgRange6__ctorMFNcNfCQCbQCaQBpQBl6LoggerZSQCwQCvQCkQCgQCe@Base 12
+ _D3std12experimental6logger4core8MsgRange6__initZ@Base 12
+ _D3std12experimental6logger4core8MsgRange9__xtoHashFNbNeKxSQCfQCeQBtQBpQBnZm@Base 12
+ _D3std12experimental6logger4core8parentOfFAyaZQe@Base 12
+ _D3std12experimental6logger4core9sharedLogFNdNeCQBuQBtQBiQBe6LoggerZv@Base 12
+ _D3std12experimental6logger4core9sharedLogFNdNfZ11trustedLoadFNaNbNiNeKOCQCtQCsQChQCd6LoggerZCQDoQDnQDcQCyQv@Base 12
+ _D3std12experimental6logger4core9sharedLogFNdNfZCQBvQBuQBjQBf6Logger@Base 12
+ _D3std12experimental6logger4core__T11trustedLoadTEQBwQBvQBkQBg8LogLevelZQBlFNaNbNiNeKOEQDhQDgQCvQCrQBlZQCc@Base 12
+ _D3std12experimental6logger4core__T11trustedLoadTxEQBxQBwQBlQBh8LogLevelZQBmFNaNbNiNeKOxEQDjQDiQCxQCtQBmZEQEaQDzQDoQDkQCd@Base 12
+ _D3std12experimental6logger4core__T12trustedStoreTEQBxQBwQBlQBh8LogLevelZQBmFNaNbNiNeKOEQDiQDhQCwQCsQBlKQCcZv@Base 12
+ _D3std12experimental6logger4core__T16isLoggingEnabledZQtFNaNfEQCiQChQBwQBs8LogLevelQwQyLbZb@Base 12
+ _D3std12experimental6logger4core__T18systimeToISOStringTSQCd5stdio4File17LockingTextWriterZQCeFNfQBpKxSQDx8datetime7systime7SysTimeZv@Base 12
+ _D3std12experimental8typecons11__moduleRefZ@Base 12
+ _D3std12experimental8typecons12__ModuleInfoZ@Base 12
+ _D3std12experimental9allocator10IAllocator11__InterfaceZ@Base 12
+ _D3std12experimental9allocator10mallocator10Mallocator10deallocateMOxFNaNbNiAvZb@Base 12
+ _D3std12experimental9allocator10mallocator10Mallocator10reallocateMOxFNaNbNiKAvmZb@Base 12
+ _D3std12experimental9allocator10mallocator10Mallocator6__initZ@Base 12
+ _D3std12experimental9allocator10mallocator10Mallocator8allocateMOxFNaNbNiNemZAv@Base 12
+ _D3std12experimental9allocator10mallocator10Mallocator8instanceOSQClQCkQBzQBsQBj@Base 12
+ _D3std12experimental9allocator10mallocator11__moduleRefZ@Base 12
+ _D3std12experimental9allocator10mallocator12__ModuleInfoZ@Base 12
+ _D3std12experimental9allocator10mallocator17AlignedMallocator10deallocateMOFNbNiAvZb@Base 12
+ _D3std12experimental9allocator10mallocator17AlignedMallocator10reallocateMOFNbNiKAvmZb@Base 12
+ _D3std12experimental9allocator10mallocator17AlignedMallocator15alignedAllocateMOFNbNiNemkZAv@Base 12
+ _D3std12experimental9allocator10mallocator17AlignedMallocator17alignedReallocateMOFNbNiKAvmkZb@Base 12
+ _D3std12experimental9allocator10mallocator17AlignedMallocator6__initZ@Base 12
+ _D3std12experimental9allocator10mallocator17AlignedMallocator8allocateMOFNbNiNemZAv@Base 12
+ _D3std12experimental9allocator10mallocator17AlignedMallocator8instanceOSQCsQCrQCgQBzQBq@Base 12
+ _D3std12experimental9allocator11__moduleRefZ@Base 12
+ _D3std12experimental9allocator12RCIAllocator10__postblitMFNaNbNiNfZv@Base 12
+ _D3std12experimental9allocator12RCIAllocator10deallocateMFNbAvZb@Base 12
+ _D3std12experimental9allocator12RCIAllocator10reallocateMFNbKAvmZb@Base 12
+ _D3std12experimental9allocator12RCIAllocator11__xopEqualsMxFKxSQCjQCiQBxQBqZb@Base 12
+ _D3std12experimental9allocator12RCIAllocator11allocateAllMFNbZAv@Base 12
+ _D3std12experimental9allocator12RCIAllocator13deallocateAllMFNbZb@Base 12
+ _D3std12experimental9allocator12RCIAllocator13goodAllocSizeMFNbmZm@Base 12
+ _D3std12experimental9allocator12RCIAllocator15alignedAllocateMFNbmkZAv@Base 12
+ _D3std12experimental9allocator12RCIAllocator17alignedReallocateMFNbKAvmkZb@Base 12
+ _D3std12experimental9allocator12RCIAllocator22resolveInternalPointerMFNbxPvKAvZSQDa8typecons7Ternary@Base 12
+ _D3std12experimental9allocator12RCIAllocator4ownsMFNbAvZSQCd8typecons7Ternary@Base 12
+ _D3std12experimental9allocator12RCIAllocator5emptyMFNbZSQCc8typecons7Ternary@Base 12
+ _D3std12experimental9allocator12RCIAllocator6__dtorMFNaNbNiNfZv@Base 12
+ _D3std12experimental9allocator12RCIAllocator6__initZ@Base 12
+ _D3std12experimental9allocator12RCIAllocator6expandMFNbKAvmZb@Base 12
+ _D3std12experimental9allocator12RCIAllocator8allocateMFNbmC8TypeInfoZAv@Base 12
+ _D3std12experimental9allocator12RCIAllocator9__xtoHashFNbNeKxSQCiQChQBwQBpZm@Base 12
+ _D3std12experimental9allocator12RCIAllocator9alignmentMFNbNdZk@Base 12
+ _D3std12experimental9allocator12RCIAllocator__T6__ctorTSQCcQCbQBqQBjZQwMFNaNbNcNiNfCQDeQDdQCs10IAllocatorZQBz@Base 12
+ _D3std12experimental9allocator12RCIAllocator__T6isNullTSQCcQCbQBqQBjZQwMFNaNbNiNfZb@Base 12
+ _D3std12experimental9allocator12RCIAllocator__T8opAssignZQkMFNaNbNcNiNfSQCsQCrQCgQBzZQo@Base 12
+ _D3std12experimental9allocator12__ModuleInfoZ@Base 12
+ _D3std12experimental9allocator12gc_allocator11GCAllocator10deallocateMOxFNaNbNiAvZb@Base 12
+ _D3std12experimental9allocator12gc_allocator11GCAllocator10reallocateMOxFNaNbKAvmZb@Base 12
+ _D3std12experimental9allocator12gc_allocator11GCAllocator13goodAllocSizeMOxFNaNbNiNfmZm@Base 12
+ _D3std12experimental9allocator12gc_allocator11GCAllocator22resolveInternalPointerMOxFNaNbNiNexPvKAvZSQDv8typecons7Ternary@Base 12
+ _D3std12experimental9allocator12gc_allocator11GCAllocator6__initZ@Base 12
+ _D3std12experimental9allocator12gc_allocator11GCAllocator6expandMOxFNaNbNeKAvmZb@Base 12
+ _D3std12experimental9allocator12gc_allocator11GCAllocator7collectMOxFNbNeZv@Base 12
+ _D3std12experimental9allocator12gc_allocator11GCAllocator8allocateMOxFNaNbNemZAv@Base 12
+ _D3std12experimental9allocator12gc_allocator11GCAllocator8instanceOxSQCpQCoQCdQBwQBl@Base 12
+ _D3std12experimental9allocator12gc_allocator11__moduleRefZ@Base 12
+ _D3std12experimental9allocator12gc_allocator12__ModuleInfoZ@Base 12
+ _D3std12experimental9allocator12theAllocatorFNbNcNdNiNfZSQCdQCcQBr12RCIAllocator@Base 12
+ _D3std12experimental9allocator12theAllocatorFNbNdNiSQByQBxQBm12RCIAllocatorZv@Base 12
+ _D3std12experimental9allocator14mmap_allocator11__moduleRefZ@Base 12
+ _D3std12experimental9allocator14mmap_allocator12__ModuleInfoZ@Base 12
+ _D3std12experimental9allocator14mmap_allocator13MmapAllocator10deallocateMOxFNaNbNiAvZb@Base 12
+ _D3std12experimental9allocator14mmap_allocator13MmapAllocator6__initZ@Base 12
+ _D3std12experimental9allocator14mmap_allocator13MmapAllocator8allocateMOxFNaNbNiNfmZ9__lambda2FNaNbNiNeZi@Base 12
+ _D3std12experimental9allocator14mmap_allocator13MmapAllocator8allocateMOxFNaNbNiNfmZAv@Base 12
+ _D3std12experimental9allocator14mmap_allocator13MmapAllocator8instanceOxSQCtQCsQChQCaQBn@Base 12
+ _D3std12experimental9allocator15building_blocks10bucketizer11__moduleRefZ@Base 12
+ _D3std12experimental9allocator15building_blocks10bucketizer12__ModuleInfoZ@Base 12
+ _D3std12experimental9allocator15building_blocks10segregator11__moduleRefZ@Base 12
+ _D3std12experimental9allocator15building_blocks10segregator12__ModuleInfoZ@Base 12
+ _D3std12experimental9allocator15building_blocks11__moduleRefZ@Base 12
+ _D3std12experimental9allocator15building_blocks12__ModuleInfoZ@Base 12
+ _D3std12experimental9allocator15building_blocks14allocator_list11__moduleRefZ@Base 12
+ _D3std12experimental9allocator15building_blocks14allocator_list12__ModuleInfoZ@Base 12
+ _D3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe10deallocateMFNaNbNiAvZb@Base 12
+ _D3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe11__xopEqualsMxFKxSQHmQHlQHaQGtQGf__TQFsTQFgTQDpZQGeZb@Base 12
+ _D3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe12addAllocatorMFNaNbNimZPSQHtQHsQHhQHaQGm__TQFzTQFnTQDwZQGl4Node@Base 12
+ _D3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe13deallocateAllMFNaNbNiZb@Base 12
+ _D3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe14moveAllocatorsMFNaNbNiAvZv@Base 12
+ _D3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe15alignedAllocateMFNaNbNimkZAv@Base 12
+ _D3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe4Node11__fieldDtorMFNaNbNiZv@Base 12
+ _D3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe4Node14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe4Node6__initZ@Base 12
+ _D3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe4Node6unusedMxFNaNbNiNfZb@Base 12
+ _D3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe4Node8opAssignMFNaNbNcNiNjSQHuQHtQHiQHbQGn__TQGaTQFoTQDxZQGmQCiZQBm@Base 12
+ _D3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe4Node9setUnusedMFNaNbNiZv@Base 12
+ _D3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe4makeMFNaNbNimZSQHjQHiQGxQGq6region__T6RegionTSQIoQInQIc14mmap_allocator13MmapAllocatorVki16VEQKj8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEn@Base 12
+ _D3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe4ownsMFNaNbNiNfAvZSQHm8typecons7Ternary@Base 12
+ _D3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe5emptyMxFNaNbNiNfZSQHm8typecons7Ternary@Base 12
+ _D3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe6__ctorMFNaNbNcNiNfKQElZSQHsQHrQHgQGzQGl__TQFyTQFmTQDvZQGk@Base 12
+ _D3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe6__ctorMFNaNbNcNiNfQEkZSQHrQHqQHfQGyQGk__TQFxTQFlTQDuZQGj@Base 12
+ _D3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe6__dtorMFNaNbNiZv@Base 12
+ _D3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe6__initZ@Base 12
+ _D3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe6expandMFNaNbNiNfKAvmZb@Base 12
+ _D3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe8allocateMFNaNbNimZAv@Base 12
+ _D3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe8opAssignMFNaNbNcNiNjSQHpQHoQHdQGwQGi__TQFvTQFjTQDsZQGhZQBj@Base 12
+ _D3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe9__xtoHashFNbNeKxSQHlQHkQGzQGsQGe__TQFrTQFfTQDoZQGdZm@Base 12
+ _D3std12experimental9allocator15building_blocks14null_allocator11__moduleRefZ@Base 12
+ _D3std12experimental9allocator15building_blocks14null_allocator12__ModuleInfoZ@Base 12
+ _D3std12experimental9allocator15building_blocks14null_allocator13NullAllocator10deallocateMOFNaNbNiNfAvZb@Base 12
+ _D3std12experimental9allocator15building_blocks14null_allocator13NullAllocator10reallocateMOFNaNbNiNfKAvmZb@Base 12
+ _D3std12experimental9allocator15building_blocks14null_allocator13NullAllocator11allocateAllMOFNaNbNiNfZAv@Base 12
+ _D3std12experimental9allocator15building_blocks14null_allocator13NullAllocator13deallocateAllMOFNaNbNiNfZb@Base 12
+ _D3std12experimental9allocator15building_blocks14null_allocator13NullAllocator15alignedAllocateMOFNaNbNiNfmkZAv@Base 12
+ _D3std12experimental9allocator15building_blocks14null_allocator13NullAllocator17alignedReallocateMOFNaNbNiNfKAvmkZb@Base 12
+ _D3std12experimental9allocator15building_blocks14null_allocator13NullAllocator22resolveInternalPointerMOxFNaNbNiNfxPvKAvZSQEq8typecons7Ternary@Base 12
+ _D3std12experimental9allocator15building_blocks14null_allocator13NullAllocator4ownsMOxFNaNbNiNfxAvZSQDu8typecons7Ternary@Base 12
+ _D3std12experimental9allocator15building_blocks14null_allocator13NullAllocator5emptyMOxFNaNbNiNfZSQDs8typecons7Ternary@Base 12
+ _D3std12experimental9allocator15building_blocks14null_allocator13NullAllocator6__initZ@Base 12
+ _D3std12experimental9allocator15building_blocks14null_allocator13NullAllocator6expandMOFNaNbNiNfKAvmZb@Base 12
+ _D3std12experimental9allocator15building_blocks14null_allocator13NullAllocator8allocateMOFNaNbNiNfmZAv@Base 12
+ _D3std12experimental9allocator15building_blocks14null_allocator13NullAllocator8instanceOSQDjQDiQCxQCqQCcQBp@Base 12
+ _D3std12experimental9allocator15building_blocks15affix_allocator11__moduleRefZ@Base 12
+ _D3std12experimental9allocator15building_blocks15affix_allocator12__ModuleInfoZ@Base 12
+ _D3std12experimental9allocator15building_blocks15bitmapped_block11__moduleRefZ@Base 12
+ _D3std12experimental9allocator15building_blocks15bitmapped_block11leadingOnesFNaNbNiNfmZk@Base 12
+ _D3std12experimental9allocator15building_blocks15bitmapped_block12__ModuleInfoZ@Base 12
+ _D3std12experimental9allocator15building_blocks15bitmapped_block13setBitsIfZeroFNaNbNiNfKmkkZb@Base 12
+ _D3std12experimental9allocator15building_blocks15bitmapped_block14findContigOnesFNaNbNiNfmkZk@Base 12
+ _D3std12experimental9allocator15building_blocks15bitmapped_block7setBitsFNaNbNiNfKmkkZv@Base 12
+ _D3std12experimental9allocator15building_blocks15bitmapped_block9BitVector11__xopEqualsMxFKxSQDnQDmQDbQCuQCgQBsZb@Base 12
+ _D3std12experimental9allocator15building_blocks15bitmapped_block9BitVector13find1BackwardMFNaNbNiNfmZm@Base 12
+ _D3std12experimental9allocator15building_blocks15bitmapped_block9BitVector13opIndexAssignMFNaNbNiNfbmZv@Base 12
+ _D3std12experimental9allocator15building_blocks15bitmapped_block9BitVector13opSliceAssignMFNaNbNiNfbZv@Base 12
+ _D3std12experimental9allocator15building_blocks15bitmapped_block9BitVector13opSliceAssignMFNaNbNiNfbmmZv@Base 12
+ _D3std12experimental9allocator15building_blocks15bitmapped_block9BitVector5find1MFNaNbNiNfmZm@Base 12
+ _D3std12experimental9allocator15building_blocks15bitmapped_block9BitVector6__ctorMFNaNbNcNiNfAmZSQDrQDqQDfQCyQCkQBw@Base 12
+ _D3std12experimental9allocator15building_blocks15bitmapped_block9BitVector6__initZ@Base 12
+ _D3std12experimental9allocator15building_blocks15bitmapped_block9BitVector6lengthMxFNaNbNiNfZm@Base 12
+ _D3std12experimental9allocator15building_blocks15bitmapped_block9BitVector7allAre0MxFNaNbNiNfZb@Base 12
+ _D3std12experimental9allocator15building_blocks15bitmapped_block9BitVector7allAre1MxFNaNbNiNfZb@Base 12
+ _D3std12experimental9allocator15building_blocks15bitmapped_block9BitVector7opIndexMFNaNbNiNfmZb@Base 12
+ _D3std12experimental9allocator15building_blocks15bitmapped_block9BitVector9__xtoHashFNbNeKxSQDmQDlQDaQCtQCfQBrZm@Base 12
+ _D3std12experimental9allocator15building_blocks15bitmapped_block9BitVector9findZerosMFNaNbNiNfymmZm@Base 12
+ _D3std12experimental9allocator15building_blocks15bitmapped_block9resetBitsFNaNbNiNfKmkkZv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector11__moduleRefZ@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector12__ModuleInfoZ@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector7Options6__initZ@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq10deallocateMFNaNbNiAvZb@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq10reallocateMFNaNbNiKAvmZb@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq11__fieldDtorMFNaNbNiZv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq13deallocateAllMFNaNbNiNfZb@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq15alignedAllocateMFNaNbNiNfmkZAv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq4ownsMFNaNbNiNfAvZSQJzQDu7Ternary@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq5emptyMFNaNbNiNfZSQJyQDt7Ternary@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq6__initZ@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq6defineFNaNbNfQCuAQCyXQDc@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq6expandMFNaNbNiNfKAvmZb@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq8allocateMFNaNbNiNfmZAv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq8opAssignMFNaNbNcNiNjSQKcQKbQJqQJjQIv__TQIhTQHuVmi4096Vmi0ZQJaZQBq@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq9bytesUsedMxFNaNbNiNfZxm@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T10addPerCallVQCwnVki0VQDfa13_6e756d4465616c6c6f63617465VQEna15_6279746573436f6e74726163746564ZQDqMFNaNbNiNfAmXv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T10addPerCallVQCwnVki0VQDfa13_6e756d5265616c6c6f63617465VQEna15_6e756d5265616c6c6f636174654f4bVQFza20_6e756d5265616c6c6f63617465496e506c616365VQHva13_62797465734e6f744d6f766564VQJda13_6279746573457870616e646564VQKla15_6279746573436f6e74726163746564VQLxa10_62797465734d6f766564ZQKqMFNaNbNiNfAmXv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T10addPerCallVQCwnVki0VQDfa16_6e756d4465616c6c6f63617465416c6cZQCkMFNaNbNiNfAmXv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T10addPerCallVQCwnVki0VQDfa7_6e756d4f776e73ZQBrMFNaNbNiNfAmXv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T10addPerCallVQCwnVki0VQDfa9_6e756d457870616e64VQEea11_6e756d457870616e644f4bVQFia13_6279746573457870616e646564VQGqa14_6279746573416c6c6f6361746564ZQFrMFNaNbNiNfAmXv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T10addPerCallVQCwnVmi0VQDfa11_6e756d416c6c6f63617465VQEja13_6e756d416c6c6f636174654f4bVQFra14_6279746573416c6c6f6361746564ZQEsMFNaNbNiNfAmXv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T10addPerCallVQCwnVmi0VQDfa18_6e756d416c69676e6564416c6c6f63617465VQExa20_6e756d416c69676e6564416c6c6f636174654f6bVQGta14_6279746573416c6c6f6361746564ZQFuMFNaNbNiNfAmXv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T10expandImplVnnVii0ZQuMFNaNbNiNfKAvmZb@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T12allocateImplVnnVii0ZQwMFNaNbNiNfmZAv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T14deallocateImplVnnVii0ZQyMFNaNbNiAvZb@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T14reallocateImplVnnVii0ZQyMFNaNbNiKAvmZb@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T17deallocateAllImplVnnVii0ZQBbMFNaNbNiNfZb@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T19alignedAllocateImplVnnVii0ZQBdMFNaNbNiNfmkZAv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T2upVQCna11_6e756d416c6c6f63617465ZQBiMFNaNbNiNfZv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T2upVQCna11_6e756d457870616e644f4bZQBiMFNaNbNiNfZv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T2upVQCna13_6e756d4465616c6c6f63617465ZQBmMFNaNbNiNfZv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T2upVQCna13_6e756d5265616c6c6f63617465ZQBmMFNaNbNiNfZv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T2upVQCna15_6e756d5265616c6c6f636174654f4bZQBqMFNaNbNiNfZv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T2upVQCna16_6e756d4465616c6c6f63617465416c6cZQBsMFNaNbNiNfZv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T2upVQCna18_6e756d416c69676e6564416c6c6f63617465ZQBwMFNaNbNiNfZv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T2upVQCna20_6e756d5265616c6c6f63617465496e506c616365ZQCaMFNaNbNiNfZv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T2upVQCna7_6e756d4f776e73ZQzMFNaNbNiNfZv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T2upVQCna9_6e756d457870616e64ZQBdMFNaNbNiNfZv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T3addVQCoa10_62797465734d6f766564ZQBhMFNaNbNiNflZv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T3addVQCoa10_6279746573536c61636bZQBhMFNaNbNiNflZv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T3addVQCoa11_6e756d416c6c6f63617465ZQBjMFNaNbNiNflZv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T3addVQCoa11_6e756d457870616e644f4bZQBjMFNaNbNiNflZv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T3addVQCoa13_6279746573457870616e646564ZQBnMFNaNbNiNflZv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T3addVQCoa13_62797465734e6f744d6f766564ZQBnMFNaNbNiNflZv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T3addVQCoa13_6e756d416c6c6f636174654f4bZQBnMFNaNbNiNflZv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T3addVQCoa13_6e756d4465616c6c6f63617465ZQBnMFNaNbNiNflZv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T3addVQCoa13_6e756d5265616c6c6f63617465ZQBnMFNaNbNiNflZv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T3addVQCoa14_6279746573416c6c6f6361746564ZQBpMFNaNbNiNflZv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T3addVQCoa15_6279746573436f6e74726163746564ZQBrMFNaNbNiNflZv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T3addVQCoa15_6e756d5265616c6c6f636174654f4bZQBrMFNaNbNiNflZv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T3addVQCoa16_6e756d4465616c6c6f63617465416c6cZQBtMFNaNbNiNflZv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T3addVQCoa18_6e756d416c69676e6564416c6c6f63617465ZQBxMFNaNbNiNflZv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T3addVQCoa20_6e756d416c69676e6564416c6c6f636174654f6bZQCbMFNaNbNiNflZv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T3addVQCoa20_6e756d5265616c6c6f63617465496e506c616365ZQCbMFNaNbNiNflZv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T3addVQCoa7_6e756d4f776e73ZQBaMFNaNbNiNflZv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T3addVQCoa9_627974657355736564ZQBeMFNaNbNiNflZv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T3addVQCoa9_6e756d457870616e64ZQBeMFNaNbNiNflZv@Base 12
+ _D3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGq__T8ownsImplVnnVii0ZQrMFNaNbNiNfAvZSQKqQEl7Ternary@Base 12
+ _D3std12experimental9allocator15building_blocks16scoped_allocator11__moduleRefZ@Base 12
+ _D3std12experimental9allocator15building_blocks16scoped_allocator12__ModuleInfoZ@Base 12
+ _D3std12experimental9allocator15building_blocks17kernighan_ritchie11__moduleRefZ@Base 12
+ _D3std12experimental9allocator15building_blocks17kernighan_ritchie12__ModuleInfoZ@Base 12
+ _D3std12experimental9allocator15building_blocks18aligned_block_list11__moduleRefZ@Base 12
+ _D3std12experimental9allocator15building_blocks18aligned_block_list12__ModuleInfoZ@Base 12
+ _D3std12experimental9allocator15building_blocks18fallback_allocator11__moduleRefZ@Base 12
+ _D3std12experimental9allocator15building_blocks18fallback_allocator12__ModuleInfoZ@Base 12
+ _D3std12experimental9allocator15building_blocks24ascending_page_allocator11__moduleRefZ@Base 12
+ _D3std12experimental9allocator15building_blocks24ascending_page_allocator12__ModuleInfoZ@Base 12
+ _D3std12experimental9allocator15building_blocks24ascending_page_allocator22AscendingPageAllocator15alignedAllocateMFNbNimkZAv@Base 12
+ _D3std12experimental9allocator15building_blocks24ascending_page_allocator22AscendingPageAllocator5emptyMFNbNiZSQEf8typecons7Ternary@Base 12
+ _D3std12experimental9allocator15building_blocks24ascending_page_allocator22AscendingPageAllocator6__dtorMFNbNiZv@Base 12
+ _D3std12experimental9allocator15building_blocks24ascending_page_allocator22AscendingPageAllocator6__initZ@Base 12
+ _D3std12experimental9allocator15building_blocks24ascending_page_allocator22AscendingPageAllocator6expandMFNbNiKAvmZb@Base 12
+ _D3std12experimental9allocator15building_blocks24ascending_page_allocator22AscendingPageAllocator8allocateMFNbNimZAv@Base 12
+ _D3std12experimental9allocator15building_blocks24ascending_page_allocator22AscendingPageAllocator8opAssignMFNbNcNiNjSQElQEkQDzQDsQDeQChZQu@Base 12
+ _D3std12experimental9allocator15building_blocks24ascending_page_allocator22AscendingPageAllocator9__mixin1510deallocateMFNbNiAvZb@Base 12
+ _D3std12experimental9allocator15building_blocks24ascending_page_allocator22AscendingPageAllocator9__mixin1513deallocateAllMFNbNiZb@Base 12
+ _D3std12experimental9allocator15building_blocks24ascending_page_allocator22AscendingPageAllocator9__mixin1513goodAllocSizeMFNbNimZm@Base 12
+ _D3std12experimental9allocator15building_blocks24ascending_page_allocator22AscendingPageAllocator9__mixin1516getAvailableSizeMFNbNiZm@Base 12
+ _D3std12experimental9allocator15building_blocks24ascending_page_allocator22AscendingPageAllocator9__mixin1522extendMemoryProtectionMFNbNiPvmZb@Base 12
+ _D3std12experimental9allocator15building_blocks24ascending_page_allocator22AscendingPageAllocator9__mixin154ownsMFNbNiAvZSQEq8typecons7Ternary@Base 12
+ _D3std12experimental9allocator15building_blocks24ascending_page_allocator22AscendingPageAllocator9__mixin156__ctorMFNbNcNimZSQEtQEsQEhQEaQDmQCp@Base 12
+ _D3std12experimental9allocator15building_blocks24ascending_page_allocator28SharedAscendingPageAllocator12allocateImplMOFNbNimkZAv@Base 12
+ _D3std12experimental9allocator15building_blocks24ascending_page_allocator28SharedAscendingPageAllocator15alignedAllocateMOFNbNimkZAv@Base 12
+ _D3std12experimental9allocator15building_blocks24ascending_page_allocator28SharedAscendingPageAllocator6__initZ@Base 12
+ _D3std12experimental9allocator15building_blocks24ascending_page_allocator28SharedAscendingPageAllocator6expandMOFNbNiKAvmZb@Base 12
+ _D3std12experimental9allocator15building_blocks24ascending_page_allocator28SharedAscendingPageAllocator8allocateMOFNbNimZAv@Base 12
+ _D3std12experimental9allocator15building_blocks24ascending_page_allocator28SharedAscendingPageAllocator9__mixin1510deallocateMOFNbNiAvZb@Base 12
+ _D3std12experimental9allocator15building_blocks24ascending_page_allocator28SharedAscendingPageAllocator9__mixin1513deallocateAllMOFNbNiZb@Base 12
+ _D3std12experimental9allocator15building_blocks24ascending_page_allocator28SharedAscendingPageAllocator9__mixin1513goodAllocSizeMOFNbNimZm@Base 12
+ _D3std12experimental9allocator15building_blocks24ascending_page_allocator28SharedAscendingPageAllocator9__mixin1516getAvailableSizeMOFNbNiZm@Base 12
+ _D3std12experimental9allocator15building_blocks24ascending_page_allocator28SharedAscendingPageAllocator9__mixin1522extendMemoryProtectionMOFNbNiPvmZb@Base 12
+ _D3std12experimental9allocator15building_blocks24ascending_page_allocator28SharedAscendingPageAllocator9__mixin154ownsMOFNbNiAvZSQEx8typecons7Ternary@Base 12
+ _D3std12experimental9allocator15building_blocks24ascending_page_allocator28SharedAscendingPageAllocator9__mixin156__ctorMOFNbNcNimZOSQFbQFaQEpQEiQDuQCx@Base 12
+ _D3std12experimental9allocator15building_blocks6region11__moduleRefZ@Base 12
+ _D3std12experimental9allocator15building_blocks6region12__ModuleInfoZ@Base 12
+ _D3std12experimental9allocator15building_blocks6region__T6RegionTSQCmQClQCa14mmap_allocator13MmapAllocatorVki16VEQEh8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEn10deallocateMFNaNbNiAvZb@Base 12
+ _D3std12experimental9allocator15building_blocks6region__T6RegionTSQCmQClQCa14mmap_allocator13MmapAllocatorVki16VEQEh8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEn10roundedEndMxFNaNbNiNeZPv@Base 12
+ _D3std12experimental9allocator15building_blocks6region__T6RegionTSQCmQClQCa14mmap_allocator13MmapAllocatorVki16VEQEh8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEn11allocateAllMFNaNbNiNeZAv@Base 12
+ _D3std12experimental9allocator15building_blocks6region__T6RegionTSQCmQClQCa14mmap_allocator13MmapAllocatorVki16VEQEh8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEn12roundedBeginMxFNaNbNiNeZPv@Base 12
+ _D3std12experimental9allocator15building_blocks6region__T6RegionTSQCmQClQCa14mmap_allocator13MmapAllocatorVki16VEQEh8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEn13deallocateAllMFNaNbNiNfZb@Base 12
+ _D3std12experimental9allocator15building_blocks6region__T6RegionTSQCmQClQCa14mmap_allocator13MmapAllocatorVki16VEQEh8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEn13goodAllocSizeMxFNaNbNiNfmZm@Base 12
+ _D3std12experimental9allocator15building_blocks6region__T6RegionTSQCmQClQCa14mmap_allocator13MmapAllocatorVki16VEQEh8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEn15alignedAllocateMFNaNbNiNemkZAv@Base 12
+ _D3std12experimental9allocator15building_blocks6region__T6RegionTSQCmQClQCa14mmap_allocator13MmapAllocatorVki16VEQEh8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEn4ownsMxFNaNbNiNexAvZSQHoQDh7Ternary@Base 12
+ _D3std12experimental9allocator15building_blocks6region__T6RegionTSQCmQClQCa14mmap_allocator13MmapAllocatorVki16VEQEh8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEn5emptyMxFNaNbNiNfZSQHmQDf7Ternary@Base 12
+ _D3std12experimental9allocator15building_blocks6region__T6RegionTSQCmQClQCa14mmap_allocator13MmapAllocatorVki16VEQEh8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEn6__ctorMFNaNbNcNiAhZSQHoQHnQHcQGvQGh__TQGdTQFzVki16VQEni0ZQGw@Base 12
+ _D3std12experimental9allocator15building_blocks6region__T6RegionTSQCmQClQCa14mmap_allocator13MmapAllocatorVki16VEQEh8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEn6__ctorMFNaNbNcNimZSQHnQHmQHbQGuQGg__TQGcTQFyVki16VQEmi0ZQGv@Base 12
+ _D3std12experimental9allocator15building_blocks6region__T6RegionTSQCmQClQCa14mmap_allocator13MmapAllocatorVki16VEQEh8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEn6__dtorMFNaNbNiZv@Base 12
+ _D3std12experimental9allocator15building_blocks6region__T6RegionTSQCmQClQCa14mmap_allocator13MmapAllocatorVki16VEQEh8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEn6__initZ@Base 12
+ _D3std12experimental9allocator15building_blocks6region__T6RegionTSQCmQClQCa14mmap_allocator13MmapAllocatorVki16VEQEh8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEn6expandMFNaNbNiNfKAvmZb@Base 12
+ _D3std12experimental9allocator15building_blocks6region__T6RegionTSQCmQClQCa14mmap_allocator13MmapAllocatorVki16VEQEh8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEn8allocateMFNaNbNiNemZAv@Base 12
+ _D3std12experimental9allocator15building_blocks6region__T6RegionTSQCmQClQCa14mmap_allocator13MmapAllocatorVki16VEQEh8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEn8opAssignMFNaNbNcNiNjSQHpQHoQHdQGwQGi__TQGeTQGaVki16VQEoi0ZQGxZQBq@Base 12
+ _D3std12experimental9allocator15building_blocks6region__T6RegionTSQCmQClQCa14mmap_allocator13MmapAllocatorVki16VEQEh8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEn9availableMxFNaNbNiNfZm@Base 12
+ _D3std12experimental9allocator15building_blocks9free_list11__moduleRefZ@Base 12
+ _D3std12experimental9allocator15building_blocks9free_list12__ModuleInfoZ@Base 12
+ _D3std12experimental9allocator15building_blocks9free_tree11__moduleRefZ@Base 12
+ _D3std12experimental9allocator15building_blocks9free_tree12__ModuleInfoZ@Base 12
+ _D3std12experimental9allocator15building_blocks9quantizer11__moduleRefZ@Base 12
+ _D3std12experimental9allocator15building_blocks9quantizer12__ModuleInfoZ@Base 12
+ _D3std12experimental9allocator16ISharedAllocator11__InterfaceZ@Base 12
+ _D3std12experimental9allocator16_threadAllocatorSQBvQBuQBj12RCIAllocator@Base 12
+ _D3std12experimental9allocator16processAllocatorFNbNcNdNiNeZ15forceAttributesFZPOSQDcQDbQCq18RCISharedAllocator@Base 12
+ _D3std12experimental9allocator16processAllocatorFNbNcNdNiNeZOSQCiQChQBw18RCISharedAllocator@Base 12
+ _D3std12experimental9allocator16processAllocatorFNbNdNiKOSQCeQCdQBs18RCISharedAllocatorZv@Base 12
+ _D3std12experimental9allocator17_processAllocatorOSQBxQBwQBl18RCISharedAllocator@Base 12
+ _D3std12experimental9allocator18RCISharedAllocator10__postblitMOFNaNbNiNfZv@Base 12
+ _D3std12experimental9allocator18RCISharedAllocator10deallocateMOFNbAvZb@Base 12
+ _D3std12experimental9allocator18RCISharedAllocator10reallocateMOFNbKAvmZb@Base 12
+ _D3std12experimental9allocator18RCISharedAllocator11__xopEqualsMxFKOxSQCqQCpQCeQBxZb@Base 12
+ _D3std12experimental9allocator18RCISharedAllocator11allocateAllMOFNbZAv@Base 12
+ _D3std12experimental9allocator18RCISharedAllocator13deallocateAllMOFNbZb@Base 12
+ _D3std12experimental9allocator18RCISharedAllocator13goodAllocSizeMOFNbmZm@Base 12
+ _D3std12experimental9allocator18RCISharedAllocator15alignedAllocateMOFNbmkZAv@Base 12
+ _D3std12experimental9allocator18RCISharedAllocator17alignedReallocateMOFNbKAvmkZb@Base 12
+ _D3std12experimental9allocator18RCISharedAllocator22resolveInternalPointerMOFNbxPvKAvZSQDh8typecons7Ternary@Base 12
+ _D3std12experimental9allocator18RCISharedAllocator4ownsMOFNbAvZSQCk8typecons7Ternary@Base 12
+ _D3std12experimental9allocator18RCISharedAllocator5emptyMOFNbZSQCj8typecons7Ternary@Base 12
+ _D3std12experimental9allocator18RCISharedAllocator6__ctorMOFNaNbNcNiNfOCQCsQCrQCg16ISharedAllocatorZOSQDwQDvQDkQDd@Base 12
+ _D3std12experimental9allocator18RCISharedAllocator6__dtorMOFNaNbNiNfZv@Base 12
+ _D3std12experimental9allocator18RCISharedAllocator6__initZ@Base 12
+ _D3std12experimental9allocator18RCISharedAllocator6expandMOFNbKAvmZb@Base 12
+ _D3std12experimental9allocator18RCISharedAllocator8allocateMOFNbmC8TypeInfoZAv@Base 12
+ _D3std12experimental9allocator18RCISharedAllocator9__xtoHashFNbNeKOxSQCpQCoQCdQBwZm@Base 12
+ _D3std12experimental9allocator18RCISharedAllocator9alignmentMOFNbNdZk@Base 12
+ _D3std12experimental9allocator18RCISharedAllocator__T6isNullTOSQCjQCiQBxQBqZQxMOFNaNbNiNfZb@Base 12
+ _D3std12experimental9allocator18RCISharedAllocator__T8opAssignZQkMOFNaNbNcNiNfOSQDaQCzQCoQChZOQp@Base 12
+ _D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator10deallocateMFNbAvZb@Base 12
+ _D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator10reallocateMFNbKAvmZb@Base 12
+ _D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator11__fieldDtorMFNaNbNiNeZv@Base 12
+ _D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator11allocateAllMFNbZAv@Base 12
+ _D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator13deallocateAllMFNbZb@Base 12
+ _D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator13goodAllocSizeMFNbmZm@Base 12
+ _D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator15alignedAllocateMFNbmkZAv@Base 12
+ _D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator17alignedReallocateMFNbKAvmkZb@Base 12
+ _D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator22resolveInternalPointerMFNbxPvKAvZSQEj8typecons7Ternary@Base 12
+ _D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator4ownsMFNbAvZSQDm8typecons7Ternary@Base 12
+ _D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator5emptyMFNbZSQDl8typecons7Ternary@Base 12
+ _D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator6__ctorMFNaNbNiNfKOSQDtQDsQDh18RCISharedAllocatorZCQEyQExQEmQEfFNbNcNiNfZQDm@Base 12
+ _D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator6__initZ@Base 12
+ _D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator6__vtblZ@Base 12
+ _D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator6decRefMFNaNbNiNfZb@Base 12
+ _D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator6expandMFNbKAvmZb@Base 12
+ _D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator6incRefMFNaNbNiNfZv@Base 12
+ _D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator7__ClassZ@Base 12
+ _D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator8allocateMFNbmC8TypeInfoZAv@Base 12
+ _D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator9alignmentMFNbNdZk@Base 12
+ _D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ21_threadAllocatorStateG4m@Base 12
+ _D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ9__lambda3FNbNiNeZv@Base 12
+ _D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZSQCjQCiQBx12RCIAllocator@Base 12
+ _D3std12experimental9allocator5typed11__moduleRefZ@Base 12
+ _D3std12experimental9allocator5typed12__ModuleInfoZ@Base 12
+ _D3std12experimental9allocator6common11__moduleRefZ@Base 12
+ _D3std12experimental9allocator6common11alignDownToFNaNbNiNkMPvkZQe@Base 12
+ _D3std12experimental9allocator6common12__ModuleInfoZ@Base 12
+ _D3std12experimental9allocator6common13divideRoundUpFNaNbNiNfmmZm@Base 12
+ _D3std12experimental9allocator6common13trailingZerosFNaNbNiNfmZk@Base 12
+ _D3std12experimental9allocator6common15forwardToMemberFAyaAQeXQh@Base 12
+ _D3std12experimental9allocator6common17roundUpToPowerOf2FNaNbNiNfmZm@Base 12
+ _D3std12experimental9allocator6common18effectiveAlignmentFNaNbNiPvZm@Base 12
+ _D3std12experimental9allocator6common18roundUpToAlignmentFNaNbNiAvkZQe@Base 12
+ _D3std12experimental9allocator6common18roundUpToAlignmentFNaNbNiNfmkZm@Base 12
+ _D3std12experimental9allocator6common19roundUpToMultipleOfFNaNbNiNfmkZm@Base 12
+ _D3std12experimental9allocator6common20roundDownToAlignmentFNaNbNiNfmkZm@Base 12
+ _D3std12experimental9allocator6common21isGoodStaticAlignmentFNaNbNiNfkZb@Base 12
+ _D3std12experimental9allocator6common22isGoodDynamicAlignmentFNaNbNiNfkZb@Base 12
+ _D3std12experimental9allocator6common22roundStartToMultipleOfFNaNbNiAvkZQe@Base 12
+ _D3std12experimental9allocator6common9alignUpToFNaNbNiNkMPvkZQe@Base 12
+ _D3std12experimental9allocator6common__T10reallocateTSQCaQBzQBo15building_blocks6region__T6RegionTSQDtQDsQDh14mmap_allocator13MmapAllocatorVki16VEQFo8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnZQGpFNaNbNiKQGnKAvmZb@Base 12
+ _D3std12experimental9allocator6common__T13goodAllocSizeTSQCdQCcQBr15building_blocks15stats_collector__T14StatsCollectorTSQEpQEoQEdQCm6region__T6RegionTSQFuQFtQFi14mmap_allocator13MmapAllocatorVki16VEQHp8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGqZQJfFNaNbNiNfKQJcmZm@Base 12
+ _D3std12experimental9allocator6common__T9alignedAtTvZQnFNaNbNiNfPvkZb@Base 12
+ _D3std12experimental9allocator8showcase11__moduleRefZ@Base 12
+ _D3std12experimental9allocator8showcase12__ModuleInfoZ@Base 12
+ _D3std12experimental9allocator8showcase14mmapRegionListFmZ7Factory6__ctorMFNcmZSQDaQCzQCoQChQCbFmZQBo@Base 12
+ _D3std12experimental9allocator8showcase14mmapRegionListFmZ7Factory6__initZ@Base 12
+ _D3std12experimental9allocator8showcase14mmapRegionListFmZ7Factory6opCallMFNaNbNimZSQDeQDdQCs15building_blocks6region__T6RegionTSQExQEwQEl14mmap_allocator13MmapAllocatorVki16VEQGs8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEn@Base 12
+ _D3std12experimental9allocator8showcase14mmapRegionListFmZSQCfQCeQBt15building_blocks14allocator_list__T13AllocatorListTSQEpQEoQEdQDwQDqFmZ7FactoryTSQFrQFqQFfQDm14null_allocator13NullAllocatorZQDl@Base 12
+ _D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk10deallocateMOFNbAvZb@Base 12
+ _D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk10reallocateMOFNbKAvmZb@Base 12
+ _D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk11allocateAllMOFNbZAv@Base 12
+ _D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk13deallocateAllMOFNbZb@Base 12
+ _D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk13goodAllocSizeMOFNbmZm@Base 12
+ _D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk15alignedAllocateMOFNbmkZAv@Base 12
+ _D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk17alignedReallocateMOFNbKAvmkZb@Base 12
+ _D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk22resolveInternalPointerMOFNbxPvKAvZSQHdQDm7Ternary@Base 12
+ _D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk4ownsMOFNbAvZSQGgQCp7Ternary@Base 12
+ _D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk5emptyMOFNbZSQGfQCo7Ternary@Base 12
+ _D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk6__initZ@Base 12
+ _D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk6__vtblZ@Base 12
+ _D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk6decRefMOFNaNbNiNeZb@Base 12
+ _D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk6expandMOFNbKAvmZb@Base 12
+ _D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk6incRefMOFNaNbNiNfZv@Base 12
+ _D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk7__ClassZ@Base 12
+ _D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk8allocateMOFNbmC8TypeInfoZAv@Base 12
+ _D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk9alignmentMOFNbNdZk@Base 12
+ _D3std12experimental9allocator__T21sharedAllocatorObjectTOxSQCgQCfQBu12gc_allocator11GCAllocatorZQCmFNbKOxQBvZ5stateOG3m@Base 12
+ _D3std12experimental9allocator__T21sharedAllocatorObjectTOxSQCgQCfQBu12gc_allocator11GCAllocatorZQCmFNbKOxQBvZ6resultOSQEnQEmQEb18RCISharedAllocator@Base 12
+ _D3std12experimental9allocator__T21sharedAllocatorObjectTOxSQCgQCfQBu12gc_allocator11GCAllocatorZQCmFNbNiKOxQBxZOSQEiQEhQDw18RCISharedAllocator@Base 12
+ _D3std3csv11__moduleRefZ@Base 12
+ _D3std3csv12CSVException6__ctorMFNaNbNiNfAyaC6object9ThrowableQvmZCQCnQCmQCl@Base 12
+ _D3std3csv12CSVException6__ctorMFNaNbNiNfAyaQdmC6object9ThrowableZCQCnQCmQCl@Base 12
+ _D3std3csv12CSVException6__ctorMFNaNbNiNfAyammC6object9ThrowableQxmZCQCpQCoQCn@Base 12
+ _D3std3csv12CSVException6__initZ@Base 12
+ _D3std3csv12CSVException6__vtblZ@Base 12
+ _D3std3csv12CSVException7__ClassZ@Base 12
+ _D3std3csv12CSVException8toStringMxFNaNfZAya@Base 12
+ _D3std3csv12__ModuleInfoZ@Base 12
+ _D3std3csv23HeaderMismatchException6__initZ@Base 12
+ _D3std3csv23HeaderMismatchException6__vtblZ@Base 12
+ _D3std3csv23HeaderMismatchException7__ClassZ@Base 12
+ _D3std3csv23HeaderMismatchException8__mixin16__ctorMFNaNbNiNfAyaC6object9ThrowableQvmZCQDhQDgQDf@Base 12
+ _D3std3csv23HeaderMismatchException8__mixin16__ctorMFNaNbNiNfAyaQdmC6object9ThrowableZCQDhQDgQDf@Base 12
+ _D3std3csv23IncompleteCellException6__initZ@Base 12
+ _D3std3csv23IncompleteCellException6__vtblZ@Base 12
+ _D3std3csv23IncompleteCellException7__ClassZ@Base 12
+ _D3std3csv23IncompleteCellException8__mixin26__ctorMFNaNbNiNfAyaC6object9ThrowableQvmZCQDhQDgQDf@Base 12
+ _D3std3csv23IncompleteCellException8__mixin26__ctorMFNaNbNiNfAyaQdmC6object9ThrowableZCQDhQDgQDf@Base 12
+ _D3std3net4curl11__moduleRefZ@Base 12
+ _D3std3net4curl12AutoProtocol6__initZ@Base 12
+ _D3std3net4curl12__ModuleInfoZ@Base 12
+ _D3std3net4curl13CurlException6__ctorMFNaNbNfAyaQdmC6object9ThrowableZCQCrQCqQCpQCn@Base 12
+ _D3std3net4curl13CurlException6__initZ@Base 12
+ _D3std3net4curl13CurlException6__vtblZ@Base 12
+ _D3std3net4curl13CurlException7__ClassZ@Base 12
+ _D3std3net4curl19HTTPStatusException6__ctorMFNaNbNfiAyaQdmC6object9ThrowableZCQCyQCxQCwQCu@Base 12
+ _D3std3net4curl19HTTPStatusException6__initZ@Base 12
+ _D3std3net4curl19HTTPStatusException6__vtblZ@Base 12
+ _D3std3net4curl19HTTPStatusException7__ClassZ@Base 12
+ _D3std3net4curl20CurlTimeoutException6__ctorMFNaNbNfAyaQdmC6object9ThrowableZCQCyQCxQCwQCu@Base 12
+ _D3std3net4curl20CurlTimeoutException6__initZ@Base 12
+ _D3std3net4curl20CurlTimeoutException6__vtblZ@Base 12
+ _D3std3net4curl20CurlTimeoutException7__ClassZ@Base 12
+ _D3std3net4curl3FTP10addCommandMFAxaZv@Base 12
+ _D3std3net4curl3FTP10initializeMFZv@Base 12
+ _D3std3net4curl3FTP11__fieldDtorMFZv@Base 12
+ _D3std3net4curl3FTP13clearCommandsMFZv@Base 12
+ _D3std3net4curl3FTP13contentLengthMFNdmZv@Base 12
+ _D3std3net4curl3FTP15__fieldPostblitMFNaNbNiNlZv@Base 12
+ _D3std3net4curl3FTP3dupMFZSQzQxQvQs@Base 12
+ _D3std3net4curl3FTP3urlMFNdAxaZv@Base 12
+ _D3std3net4curl3FTP4Impl11__xopEqualsMxFKxSQBpQBoQBnQBlQBkZb@Base 12
+ _D3std3net4curl3FTP4Impl6__dtorMFZv@Base 12
+ _D3std3net4curl3FTP4Impl6__initZ@Base 12
+ _D3std3net4curl3FTP4Impl8opAssignMFNcNjSQBmQBlQBkQBiQBhZQr@Base 12
+ _D3std3net4curl3FTP4Impl9__xtoHashFNbNeKxSQBoQBnQBmQBkQBjZm@Base 12
+ _D3std3net4curl3FTP6__initZ@Base 12
+ _D3std3net4curl3FTP6opCallFAxaZSQBeQBdQBcQBa@Base 12
+ _D3std3net4curl3FTP6opCallFZSQBbQBaQzQw@Base 12
+ _D3std3net4curl3FTP7performMFEQBc8typecons__T4FlagVAyaa12_7468726f774f6e4572726f72ZQBmZi@Base 12
+ _D3std3net4curl3FTP8encodingMFNdAyaZv@Base 12
+ _D3std3net4curl3FTP8encodingMFNdZAya@Base 12
+ _D3std3net4curl3FTP8opAssignMFNcNjSQBhQBgQBfQBdZQo@Base 12
+ _D3std3net4curl3FTP9__mixin1910dnsTimeoutMFNdS4core4time8DurationZv@Base 12
+ _D3std3net4curl3FTP9__mixin1910onProgressMFNdDFmmmmZiZv@Base 12
+ _D3std3net4curl3FTP9__mixin1910setNoProxyMFAyaZv@Base 12
+ _D3std3net4curl3FTP9__mixin1910tcpNoDelayMFNdbZv@Base 12
+ _D3std3net4curl3FTP9__mixin1910verifyHostMFNdbZv@Base 12
+ _D3std3net4curl3FTP9__mixin1910verifyPeerMFNdbZv@Base 12
+ _D3std3net4curl3FTP9__mixin1911dataTimeoutMFNdS4core4time8DurationZv@Base 12
+ _D3std3net4curl3FTP9__mixin1912netInterfaceMFNdAxaZv@Base 12
+ _D3std3net4curl3FTP9__mixin1912netInterfaceMFNdCQBu6socket15InternetAddressZv@Base 12
+ _D3std3net4curl3FTP9__mixin1912netInterfaceMFNdxG4hZv@Base 12
+ _D3std3net4curl3FTP9__mixin1914connectTimeoutMFNdS4core4time8DurationZv@Base 12
+ _D3std3net4curl3FTP9__mixin1914localPortRangeMFNdtZv@Base 12
+ _D3std3net4curl3FTP9__mixin1916operationTimeoutMFNdS4core4time8DurationZv@Base 12
+ _D3std3net4curl3FTP9__mixin1917setAuthenticationMFAxaQdQfZv@Base 12
+ _D3std3net4curl3FTP9__mixin1922setProxyAuthenticationMFAxaQdZv@Base 12
+ _D3std3net4curl3FTP9__mixin1928defaultAsyncStringBufferSizek@Base 12
+ _D3std3net4curl3FTP9__mixin195proxyMFNdAxaZv@Base 12
+ _D3std3net4curl3FTP9__mixin196handleMFNcNdNjZSQBsQBrQBq4Curl@Base 12
+ _D3std3net4curl3FTP9__mixin196onSendMFNdDFAvZmZv@Base 12
+ _D3std3net4curl3FTP9__mixin197verboseMFNdbZv@Base 12
+ _D3std3net4curl3FTP9__mixin198shutdownMFZv@Base 12
+ _D3std3net4curl3FTP9__mixin199isStoppedMFNdZb@Base 12
+ _D3std3net4curl3FTP9__mixin199localPortMFNdtZv@Base 12
+ _D3std3net4curl3FTP9__mixin199onReceiveMFNdDFAhZmZv@Base 12
+ _D3std3net4curl3FTP9__mixin199proxyPortMFNdtZv@Base 12
+ _D3std3net4curl3FTP9__mixin199proxyTypeMFNdE3etc1cQBo9CurlProxyZv@Base 12
+ _D3std3net4curl3FTP9getTimingMFE3etc1cQBc8CurlInfoKdZi@Base 12
+ _D3std3net4curl4Curl10initializeMFZv@Base 12
+ _D3std3net4curl4Curl10onProgressMFNdDFmmmmZiZv@Base 12
+ _D3std3net4curl4Curl11errorStringMFiZAya@Base 12
+ _D3std3net4curl4Curl13_seekCallbackUPvliZi@Base 12
+ _D3std3net4curl4Curl13_sendCallbackUPammPvZm@Base 12
+ _D3std3net4curl4Curl14onSocketOptionMFNdDFEQBp6socket8socket_tE3etc1cQCh12CurlSockTypeZiZv@Base 12
+ _D3std3net4curl4Curl14throwOnStoppedMFAyaZv@Base 12
+ _D3std3net4curl4Curl15onReceiveHeaderMFNdDFIAaZvZv@Base 12
+ _D3std3net4curl4Curl16_receiveCallbackUxPammPvZm@Base 12
+ _D3std3net4curl4Curl16clearIfSupportedMFE3etc1cQBl10CurlOptionZv@Base 12
+ _D3std3net4curl4Curl17_progressCallbackUPvddddZi@Base 12
+ _D3std3net4curl4Curl21_socketOptionCallbackUPvEQBt6socket8socket_tiZi@Base 12
+ _D3std3net4curl4Curl22_receiveHeaderCallbackUxPammPvZm@Base 12
+ _D3std3net4curl4Curl3dupMFZSQBaQzQxQu@Base 12
+ _D3std3net4curl4Curl3setMFE3etc1cQx10CurlOptionAxaZv@Base 12
+ _D3std3net4curl4Curl3setMFE3etc1cQx10CurlOptionPvZv@Base 12
+ _D3std3net4curl4Curl3setMFE3etc1cQx10CurlOptionlZv@Base 12
+ _D3std3net4curl4Curl5clearMFE3etc1cQz10CurlOptionZv@Base 12
+ _D3std3net4curl4Curl5pauseMFbbZv@Base 12
+ _D3std3net4curl4Curl6__initZ@Base 12
+ _D3std3net4curl4Curl6_checkMFiZv@Base 12
+ _D3std3net4curl4Curl6onSeekMFNdDFlE3etc1cQBf11CurlSeekPosZEQyQwQCb8CurlSeekZv@Base 12
+ _D3std3net4curl4Curl6onSendMFNdDFAvZmZv@Base 12
+ _D3std3net4curl4Curl7performMFEQBd8typecons__T4FlagVAyaa12_7468726f774f6e4572726f72ZQBmZi@Base 12
+ _D3std3net4curl4Curl7stoppedMxFNdZb@Base 12
+ _D3std3net4curl4Curl8shutdownMFZv@Base 12
+ _D3std3net4curl4Curl9getTimingMFE3etc1cQBd8CurlInfoKdZi@Base 12
+ _D3std3net4curl4Curl9onReceiveMFNdDFAhZmZv@Base 12
+ _D3std3net4curl4CurlQkFNcNdZSQBbQBaQz7CurlAPI3API@Base 12
+ _D3std3net4curl4HTTP10StatusLine11__xopEqualsMxFKxSQBxQBwQBvQBtQBrZb@Base 12
+ _D3std3net4curl4HTTP10StatusLine5resetMFNfZv@Base 12
+ _D3std3net4curl4HTTP10StatusLine6__initZ@Base 12
+ _D3std3net4curl4HTTP10StatusLine8toStringMxFZAya@Base 12
+ _D3std3net4curl4HTTP10StatusLine9__xtoHashFNbNeKxSQBwQBvQBuQBsQBqZm@Base 12
+ _D3std3net4curl4HTTP10initializeMFZv@Base 12
+ _D3std3net4curl4HTTP10statusLineMFNdZSQBkQBjQBiQBg10StatusLine@Base 12
+ _D3std3net4curl4HTTP11__fieldDtorMFZv@Base 12
+ _D3std3net4curl4HTTP11setPostDataMFAxvAyaZv@Base 12
+ _D3std3net4curl4HTTP12maxRedirectsMFNdkZv@Base 12
+ _D3std3net4curl4HTTP12setCookieJarMFAxaZv@Base 12
+ _D3std3net4curl4HTTP12setUserAgentMFAxaZv@Base 12
+ _D3std3net4curl4HTTP13contentLengthMFNdmZv@Base 12
+ _D3std3net4curl4HTTP14flushCookieJarMFZv@Base 12
+ _D3std3net4curl4HTTP15__fieldPostblitMFNaNbNiNlZv@Base 12
+ _D3std3net4curl4HTTP15clearAllCookiesMFZv@Base 12
+ _D3std3net4curl4HTTP15onReceiveHeaderMFNdDFIAaIQdZvZv@Base 12
+ _D3std3net4curl4HTTP15parseStatusLineFNfxAaJSQBrQBqQBpQBn10StatusLineZb@Base 12
+ _D3std3net4curl4HTTP15responseHeadersMFNdZHAyaQd@Base 12
+ _D3std3net4curl4HTTP16addRequestHeaderMFAxaQdZv@Base 12
+ _D3std3net4curl4HTTP16defaultUserAgentFNdZ3bufG63a@Base 12
+ _D3std3net4curl4HTTP16defaultUserAgentFNdZ9userAgentAya@Base 12
+ _D3std3net4curl4HTTP16defaultUserAgentFNdZAya@Base 12
+ _D3std3net4curl4HTTP16setTimeConditionMFE3etc1cQBl12CurlTimeCondSQCl8datetime7systime7SysTimeZv@Base 12
+ _D3std3net4curl4HTTP19clearRequestHeadersMFZv@Base 12
+ _D3std3net4curl4HTTP19clearSessionCookiesMFZv@Base 12
+ _D3std3net4curl4HTTP19defaultMaxRedirectsk@Base 12
+ _D3std3net4curl4HTTP19onReceiveStatusLineMFNdDFSQBuQBtQBsQBq10StatusLineZvZv@Base 12
+ _D3std3net4curl4HTTP20authenticationMethodMFNdE3etc1cQBr8CurlAuthZv@Base 12
+ _D3std3net4curl4HTTP3dupMFZSQBaQzQxQu@Base 12
+ _D3std3net4curl4HTTP3urlMFNdAxaZv@Base 12
+ _D3std3net4curl4HTTP4Impl11__xopEqualsMxFKxSQBqQBpQBoQBmQBkZb@Base 12
+ _D3std3net4curl4HTTP4Impl15onReceiveHeaderMFNdDFIAaIQdZvZv@Base 12
+ _D3std3net4curl4HTTP4Impl6__dtorMFZv@Base 12
+ _D3std3net4curl4HTTP4Impl6__initZ@Base 12
+ _D3std3net4curl4HTTP4Impl8opAssignMFNcNjSQBnQBmQBlQBjQBhZQr@Base 12
+ _D3std3net4curl4HTTP4Impl9__xtoHashFNbNeKxSQBpQBoQBnQBlQBjZm@Base 12
+ _D3std3net4curl4HTTP6__initZ@Base 12
+ _D3std3net4curl4HTTP6caInfoMFNdAxaZv@Base 12
+ _D3std3net4curl4HTTP6methodMFNdEQBeQBdQBcQBa6MethodZv@Base 12
+ _D3std3net4curl4HTTP6methodMFNdZEQBfQBeQBdQBb6Method@Base 12
+ _D3std3net4curl4HTTP6opCallFAxaZSQBfQBeQBdQBb@Base 12
+ _D3std3net4curl4HTTP6opCallFZSQBcQBbQBaQy@Base 12
+ _D3std3net4curl4HTTP7performMFEQBd8typecons__T4FlagVAyaa12_7468726f774f6e4572726f72ZQBmZi@Base 12
+ _D3std3net4curl4HTTP8opAssignMFNcNjSQBiQBhQBgQBeZQo@Base 12
+ _D3std3net4curl4HTTP8postDataMFNdAxaZv@Base 12
+ _D3std3net4curl4HTTP8postDataMFNdAxvZv@Base 12
+ _D3std3net4curl4HTTP9__mixin4610dnsTimeoutMFNdS4core4time8DurationZv@Base 12
+ _D3std3net4curl4HTTP9__mixin4610onProgressMFNdDFmmmmZiZv@Base 12
+ _D3std3net4curl4HTTP9__mixin4610setNoProxyMFAyaZv@Base 12
+ _D3std3net4curl4HTTP9__mixin4610tcpNoDelayMFNdbZv@Base 12
+ _D3std3net4curl4HTTP9__mixin4610verifyHostMFNdbZv@Base 12
+ _D3std3net4curl4HTTP9__mixin4610verifyPeerMFNdbZv@Base 12
+ _D3std3net4curl4HTTP9__mixin4611dataTimeoutMFNdS4core4time8DurationZv@Base 12
+ _D3std3net4curl4HTTP9__mixin4612netInterfaceMFNdAxaZv@Base 12
+ _D3std3net4curl4HTTP9__mixin4612netInterfaceMFNdCQBv6socket15InternetAddressZv@Base 12
+ _D3std3net4curl4HTTP9__mixin4612netInterfaceMFNdxG4hZv@Base 12
+ _D3std3net4curl4HTTP9__mixin4614connectTimeoutMFNdS4core4time8DurationZv@Base 12
+ _D3std3net4curl4HTTP9__mixin4614localPortRangeMFNdtZv@Base 12
+ _D3std3net4curl4HTTP9__mixin4616operationTimeoutMFNdS4core4time8DurationZv@Base 12
+ _D3std3net4curl4HTTP9__mixin4617setAuthenticationMFAxaQdQfZv@Base 12
+ _D3std3net4curl4HTTP9__mixin4622setProxyAuthenticationMFAxaQdZv@Base 12
+ _D3std3net4curl4HTTP9__mixin4628defaultAsyncStringBufferSizek@Base 12
+ _D3std3net4curl4HTTP9__mixin465proxyMFNdAxaZv@Base 12
+ _D3std3net4curl4HTTP9__mixin466handleMFNcNdNjZSQBtQBsQBr4Curl@Base 12
+ _D3std3net4curl4HTTP9__mixin466onSendMFNdDFAvZmZv@Base 12
+ _D3std3net4curl4HTTP9__mixin467verboseMFNdbZv@Base 12
+ _D3std3net4curl4HTTP9__mixin468shutdownMFZv@Base 12
+ _D3std3net4curl4HTTP9__mixin469isStoppedMFNdZb@Base 12
+ _D3std3net4curl4HTTP9__mixin469localPortMFNdtZv@Base 12
+ _D3std3net4curl4HTTP9__mixin469onReceiveMFNdDFAhZmZv@Base 12
+ _D3std3net4curl4HTTP9__mixin469proxyPortMFNdtZv@Base 12
+ _D3std3net4curl4HTTP9__mixin469proxyTypeMFNdE3etc1cQBp9CurlProxyZv@Base 12
+ _D3std3net4curl4HTTP9getTimingMFE3etc1cQBd8CurlInfoKdZi@Base 12
+ _D3std3net4curl4HTTP9setCookieMFAxaZv@Base 12
+ _D3std3net4curl4SMTP10initializeMFZv@Base 12
+ _D3std3net4curl4SMTP11__fieldDtorMFZv@Base 12
+ _D3std3net4curl4SMTP15__fieldPostblitMFNaNbNiNlZv@Base 12
+ _D3std3net4curl4SMTP3urlMFNdAxaZv@Base 12
+ _D3std3net4curl4SMTP4Impl6__dtorMFZv@Base 12
+ _D3std3net4curl4SMTP4Impl6__initZ@Base 12
+ _D3std3net4curl4SMTP4Impl7messageMFNdAyaZv@Base 12
+ _D3std3net4curl4SMTP4Impl8opAssignMFNcNjSQBnQBmQBlQBjQBhZQr@Base 12
+ _D3std3net4curl4SMTP6__initZ@Base 12
+ _D3std3net4curl4SMTP6opCallFAxaZSQBfQBeQBdQBb@Base 12
+ _D3std3net4curl4SMTP6opCallFZSQBcQBbQBaQy@Base 12
+ _D3std3net4curl4SMTP7messageMFNdAyaZv@Base 12
+ _D3std3net4curl4SMTP7performMFEQBd8typecons__T4FlagVAyaa12_7468726f774f6e4572726f72ZQBmZi@Base 12
+ _D3std3net4curl4SMTP8opAssignMFNcNjSQBiQBhQBgQBeZQo@Base 12
+ _D3std3net4curl4SMTP9__mixin1310dnsTimeoutMFNdS4core4time8DurationZv@Base 12
+ _D3std3net4curl4SMTP9__mixin1310onProgressMFNdDFmmmmZiZv@Base 12
+ _D3std3net4curl4SMTP9__mixin1310setNoProxyMFAyaZv@Base 12
+ _D3std3net4curl4SMTP9__mixin1310tcpNoDelayMFNdbZv@Base 12
+ _D3std3net4curl4SMTP9__mixin1310verifyHostMFNdbZv@Base 12
+ _D3std3net4curl4SMTP9__mixin1310verifyPeerMFNdbZv@Base 12
+ _D3std3net4curl4SMTP9__mixin1311dataTimeoutMFNdS4core4time8DurationZv@Base 12
+ _D3std3net4curl4SMTP9__mixin1312netInterfaceMFNdAxaZv@Base 12
+ _D3std3net4curl4SMTP9__mixin1312netInterfaceMFNdCQBv6socket15InternetAddressZv@Base 12
+ _D3std3net4curl4SMTP9__mixin1312netInterfaceMFNdxG4hZv@Base 12
+ _D3std3net4curl4SMTP9__mixin1314connectTimeoutMFNdS4core4time8DurationZv@Base 12
+ _D3std3net4curl4SMTP9__mixin1314localPortRangeMFNdtZv@Base 12
+ _D3std3net4curl4SMTP9__mixin1316operationTimeoutMFNdS4core4time8DurationZv@Base 12
+ _D3std3net4curl4SMTP9__mixin1317setAuthenticationMFAxaQdQfZv@Base 12
+ _D3std3net4curl4SMTP9__mixin1322setProxyAuthenticationMFAxaQdZv@Base 12
+ _D3std3net4curl4SMTP9__mixin1328defaultAsyncStringBufferSizek@Base 12
+ _D3std3net4curl4SMTP9__mixin135proxyMFNdAxaZv@Base 12
+ _D3std3net4curl4SMTP9__mixin136handleMFNcNdNjZSQBtQBsQBr4Curl@Base 12
+ _D3std3net4curl4SMTP9__mixin136onSendMFNdDFAvZmZv@Base 12
+ _D3std3net4curl4SMTP9__mixin137verboseMFNdbZv@Base 12
+ _D3std3net4curl4SMTP9__mixin138shutdownMFZv@Base 12
+ _D3std3net4curl4SMTP9__mixin139isStoppedMFNdZb@Base 12
+ _D3std3net4curl4SMTP9__mixin139localPortMFNdtZv@Base 12
+ _D3std3net4curl4SMTP9__mixin139onReceiveMFNdDFAhZmZv@Base 12
+ _D3std3net4curl4SMTP9__mixin139proxyPortMFNdtZv@Base 12
+ _D3std3net4curl4SMTP9__mixin139proxyTypeMFNdE3etc1cQBp9CurlProxyZv@Base 12
+ _D3std3net4curl7CurlAPI3API6__initZ@Base 12
+ _D3std3net4curl7CurlAPI4_apiSQBbQBaQzQw3API@Base 12
+ _D3std3net4curl7CurlAPI6__initZ@Base 12
+ _D3std3net4curl7CurlAPI7_handlePv@Base 12
+ _D3std3net4curl7CurlAPI7loadAPIFZ5namesyAAa@Base 12
+ _D3std3net4curl7CurlAPI7loadAPIFZ7cleanupUZv@Base 12
+ _D3std3net4curl7CurlAPI7loadAPIFZPv@Base 12
+ _D3std3net4curl7CurlAPI8instanceFNcNdZSQBlQBkQBjQBh3API@Base 12
+ _D3std3net4curl8isFTPUrlFAxaZb@Base 12
+ _D3std3net7isemail10AsciiToken6__initZ@Base 12
+ _D3std3net7isemail11EmailStatus10domainPartMxFNaNbNdNiNjNfZAya@Base 12
+ _D3std3net7isemail11EmailStatus10statusCodeMxFNaNbNdNiNlNfZEQCgQCfQCe15EmailStatusCode@Base 12
+ _D3std3net7isemail11EmailStatus11__xopEqualsMxFKxSQBwQBvQBuQBpZb@Base 12
+ _D3std3net7isemail11EmailStatus5validMxFNaNbNdNiNlNfZb@Base 12
+ _D3std3net7isemail11EmailStatus6__ctorMFNaNbNcNiNfbAyaQdEQCdQCcQCb15EmailStatusCodeZSQDfQDeQDdQCy@Base 12
+ _D3std3net7isemail11EmailStatus6__initZ@Base 12
+ _D3std3net7isemail11EmailStatus6statusMxFNaNbNdNiNlNfZAya@Base 12
+ _D3std3net7isemail11EmailStatus8toStringMxFNaNlNfZAya@Base 12
+ _D3std3net7isemail11EmailStatus9__xtoHashFNbNeKxSQBvQBuQBtQBoZm@Base 12
+ _D3std3net7isemail11EmailStatus9localPartMxFNaNbNdNiNjNfZAya@Base 12
+ _D3std3net7isemail11__moduleRefZ@Base 12
+ _D3std3net7isemail12__ModuleInfoZ@Base 12
+ _D3std3net7isemail15EmailStatusCode6__initZ@Base 12
+ _D3std3net7isemail21statusCodeDescriptionFNaNbNiNfEQBxQBwQBv15EmailStatusCodeZAya@Base 12
+ _D3std3uni10compressToFNaNbNfkMKAhZv@Base 12
+ _D3std3uni10isAlphaNumFNaNbNiNfwZb@Base 12
+ _D3std3uni10nfkcQCTrieFNaNbNdNiNfZ3resySQBmQBl__T4TrieTSQCcQCb__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDsQDr__T9sliceBitsVmi13Vmi21ZQvTSQFaQEz__TQBiVmi8Vmi13ZQBvTSQGbQGa__TQCjVmi0Vmi8ZQCvZQFf@Base 12
+ _D3std3uni10nfkcQCTrieFNaNbNdNiNfZySQBiQBh__T4TrieTSQByQBx__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDoQDn__T9sliceBitsVmi13Vmi21ZQvTSQEwQEv__TQBiVmi8Vmi13ZQBvTSQFxQFw__TQCjVmi0Vmi8ZQCvZQFf@Base 12
+ _D3std3uni10nfkdQCTrieFNaNbNdNiNfZ3resySQBmQBl__T4TrieTSQCcQCb__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDsQDr__T9sliceBitsVmi13Vmi21ZQvTSQFaQEz__TQBiVmi8Vmi13ZQBvTSQGbQGa__TQCjVmi0Vmi8ZQCvZQFf@Base 12
+ _D3std3uni10nfkdQCTrieFNaNbNdNiNfZySQBiQBh__T4TrieTSQByQBx__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDoQDn__T9sliceBitsVmi13Vmi21ZQvTSQEwQEv__TQBiVmi8Vmi13ZQBvTSQFxQFw__TQCjVmi0Vmi8ZQCvZQFf@Base 12
+ _D3std3uni10numberTrieFNaNbNdNiNfZ3resySQBmQBl__T4TrieTSQCcQCb__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDsQDr__T9sliceBitsVmi13Vmi21ZQvTSQFaQEz__TQBiVmi7Vmi13ZQBvTSQGbQGa__TQCjVmi0Vmi7ZQCvZQFf@Base 12
+ _D3std3uni10numberTrieFNaNbNdNiNfZySQBiQBh__T4TrieTSQByQBx__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDoQDn__T9sliceBitsVmi13Vmi21ZQvTSQEwQEv__TQBiVmi7Vmi13ZQBvTSQFxQFw__TQCjVmi0Vmi7ZQCvZQFf@Base 12
+ _D3std3uni10safeRead24FNaNbNiMxPhmZk@Base 12
+ _D3std3uni10symbolTrieFNaNbNdNiNfZ3resySQBmQBl__T4TrieTSQCcQCb__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDsQDr__T9sliceBitsVmi13Vmi21ZQvTSQFaQEz__TQBiVmi8Vmi13ZQBvTSQGbQGa__TQCjVmi0Vmi8ZQCvZQFf@Base 12
+ _D3std3uni10symbolTrieFNaNbNdNiNfZySQBiQBh__T4TrieTSQByQBx__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDoQDn__T9sliceBitsVmi13Vmi21ZQvTSQEwQEv__TQBiVmi8Vmi13ZQBvTSQFxQFw__TQCjVmi0Vmi8ZQCvZQFf@Base 12
+ _D3std3uni10toLowerTabFNaNbNiNemZw@Base 12
+ _D3std3uni10toTitleTabFNaNbNiNemZw@Base 12
+ _D3std3uni10toUpperTabFNaNbNiNemZw@Base 12
+ _D3std3uni11__moduleRefZ@Base 12
+ _D3std3uni11caseEncloseFNaNfSQBbQBa__T13InversionListTSQCbQCa8GcPolicyZQBhZQBv@Base 12
+ _D3std3uni11composeJamoFNaNbNiNfwwwZw@Base 12
+ _D3std3uni11isGraphicalFNaNbNiNfwZb@Base 12
+ _D3std3uni11isSurrogateFNaNbNiNfwZb@Base 12
+ _D3std3uni11safeWrite24FNaNbNiMPhkmZv@Base 12
+ _D3std3uni11toTitlecaseFNaNbNiNfwZw@Base 12
+ _D3std3uni12__ModuleInfoZ@Base 12
+ _D3std3uni12fullCaseTrieFNaNbNdNiNfZ3resySQBoQBn__T4TrieTtTwVmi1114112TSQCsQCr__T9sliceBitsVmi13Vmi21ZQvTSQEaQDz__TQBiVmi6Vmi13ZQBvTSQFbQFa__TQCjVmi0Vmi6ZQCvZQEd@Base 12
+ _D3std3uni12fullCaseTrieFNaNbNdNiNfZySQBkQBj__T4TrieTtTwVmi1114112TSQCoQCn__T9sliceBitsVmi13Vmi21ZQvTSQDwQDv__TQBiVmi6Vmi13ZQBvTSQExQEw__TQCjVmi0Vmi6ZQCvZQEd@Base 12
+ _D3std3uni12isPow2OrZeroFNaNbNiNfmZb@Base 12
+ _D3std3uni12isPrivateUseFNaNbNiNfwZb@Base 12
+ _D3std3uni12toLowerIndexFNaNbNiNewZt@Base 12
+ _D3std3uni12toTitleIndexFNaNbNiNewZt@Base 12
+ _D3std3uni12toUpperIndexFNaNbNiNewZt@Base 12
+ _D3std3uni13ReallocPolicy6__initZ@Base 12
+ _D3std3uni13ReallocPolicy__T5allocTkZQjFNaNbNiNemZAk@Base 12
+ _D3std3uni13ReallocPolicy__T6appendTkTiZQmFNaNbNiNfKAkiZv@Base 12
+ _D3std3uni13ReallocPolicy__T7destroyTkZQlFNaNbNiNeMKAkZv@Base 12
+ _D3std3uni13ReallocPolicy__T7reallocTkZQlFNaNbNiNeNkMAkmZQe@Base 12
+ _D3std3uni13getUnicodeSetFNfMxAabbZSQBiQBh__T13InversionListTSQCiQCh8GcPolicyZQBh@Base 12
+ _D3std3uni13graphicalTrieFNaNbNdNiNfZ3resySQBpQBo__T4TrieTSQCfQCe__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDvQDu__T9sliceBitsVmi13Vmi21ZQvTSQFdQFc__TQBiVmi8Vmi13ZQBvTSQGeQGd__TQCjVmi0Vmi8ZQCvZQFf@Base 12
+ _D3std3uni13graphicalTrieFNaNbNdNiNfZySQBlQBk__T4TrieTSQCbQCa__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDrQDq__T9sliceBitsVmi13Vmi21ZQvTSQEzQEy__TQBiVmi8Vmi13ZQBvTSQGaQFz__TQCjVmi0Vmi8ZQCvZQFf@Base 12
+ _D3std3uni13isPunctuationFNaNbNiNfwZb@Base 12
+ _D3std3uni13isSurrogateHiFNaNbNiNfwZb@Base 12
+ _D3std3uni13isSurrogateLoFNaNbNiNfwZb@Base 12
+ _D3std3uni13lowerCaseTrieFNaNbNdNiNfZ3resySQBpQBo__T4TrieTSQCfQCe__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDvQDu__T9sliceBitsVmi13Vmi21ZQvTSQFdQFc__TQBiVmi9Vmi13ZQBvTSQGeQGd__TQCjVmi0Vmi9ZQCvZQFf@Base 12
+ _D3std3uni13lowerCaseTrieFNaNbNdNiNfZySQBlQBk__T4TrieTSQCbQCa__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDrQDq__T9sliceBitsVmi13Vmi21ZQvTSQEzQEy__TQBiVmi9Vmi13ZQBvTSQGaQFz__TQCjVmi0Vmi9ZQCvZQFf@Base 12
+ _D3std3uni13upperCaseTrieFNaNbNdNiNfZ3resySQBpQBo__T4TrieTSQCfQCe__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDvQDu__T9sliceBitsVmi13Vmi21ZQvTSQFdQFc__TQBiVmi9Vmi13ZQBvTSQGeQGd__TQCjVmi0Vmi9ZQCvZQFf@Base 12
+ _D3std3uni13upperCaseTrieFNaNbNdNiNfZySQBlQBk__T4TrieTSQCbQCa__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDrQDq__T9sliceBitsVmi13Vmi21ZQvTSQEzQEy__TQBiVmi9Vmi13ZQBvTSQGaQFz__TQCjVmi0Vmi9ZQCvZQFf@Base 12
+ _D3std3uni13wordCharacterFNdNfZSQBeQBd__T13InversionListTSQCeQCd8GcPolicyZQBh@Base 12
+ _D3std3uni14MatcherConcept6__initZ@Base 12
+ _D3std3uni14combiningClassFNaNbNiNfwZh@Base 12
+ _D3std3uni14decompressFromFNaNfMAxhKmZk@Base 12
+ _D3std3uni14isNonCharacterFNaNbNiNfwZb@Base 12
+ _D3std3uni14simpleCaseTrieFNaNbNdNiNfZ3resySQBqQBp__T4TrieTtTwVmi1114112TSQCuQCt__T9sliceBitsVmi13Vmi21ZQvTSQEcQEb__TQBiVmi6Vmi13ZQBvTSQFdQFc__TQCjVmi0Vmi6ZQCvZQEd@Base 12
+ _D3std3uni14simpleCaseTrieFNaNbNdNiNfZySQBmQBl__T4TrieTtTwVmi1114112TSQCqQCp__T9sliceBitsVmi13Vmi21ZQvTSQDyQDx__TQBiVmi6Vmi13ZQBvTSQEzQEy__TQCjVmi0Vmi6ZQCvZQEd@Base 12
+ _D3std3uni14toLowerInPlaceFNaNfKAaZv@Base 12
+ _D3std3uni14toLowerInPlaceFNaNfKAuZv@Base 12
+ _D3std3uni14toLowerInPlaceFNaNfKAwZv@Base 12
+ _D3std3uni14toUpperInPlaceFNaNfKAaZv@Base 12
+ _D3std3uni14toUpperInPlaceFNaNfKAuZv@Base 12
+ _D3std3uni14toUpperInPlaceFNaNfKAwZv@Base 12
+ _D3std3uni15decomposeHangulFNfwZSQBfQBe8Grapheme@Base 12
+ _D3std3uni15hangulRecomposeFNaNbNiNfMAwZv@Base 12
+ _D3std3uni15punctuationTrieFNaNbNdNiNfZ3resySQBrQBq__T4TrieTSQChQCg__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDxQDw__T9sliceBitsVmi13Vmi21ZQvTSQFfQFe__TQBiVmi8Vmi13ZQBvTSQGgQGf__TQCjVmi0Vmi8ZQCvZQFf@Base 12
+ _D3std3uni15punctuationTrieFNaNbNdNiNfZySQBnQBm__T4TrieTSQCdQCc__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDtQDs__T9sliceBitsVmi13Vmi21ZQvTSQFbQFa__TQBiVmi8Vmi13ZQBvTSQGcQGb__TQCjVmi0Vmi8ZQCvZQFf@Base 12
+ _D3std3uni15unalignedRead24FNaNbNiMxPhmZk@Base 12
+ _D3std3uni16canonMappingTrieFNaNbNdNiNfZ3resySQBsQBr__T4TrieTtTwVmi1114112TSQCwQCv__T9sliceBitsVmi13Vmi21ZQvTSQEeQEd__TQBiVmi6Vmi13ZQBvTSQFfQFe__TQCjVmi0Vmi6ZQCvZQEd@Base 12
+ _D3std3uni16canonMappingTrieFNaNbNdNiNfZySQBoQBn__T4TrieTtTwVmi1114112TSQCsQCr__T9sliceBitsVmi13Vmi21ZQvTSQEaQDz__TQBiVmi6Vmi13ZQBvTSQFbQFa__TQCjVmi0Vmi6ZQCvZQEd@Base 12
+ _D3std3uni16nonCharacterTrieFNaNbNdNiNfZ3resySQBsQBr__T4TrieTSQCiQCh__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDyQDx__T9sliceBitsVmi14Vmi21ZQvTSQFgQFf__TQBiVmi10Vmi14ZQBwTSQGiQGh__TQCkVmi6Vmi10ZQCxTSQHjQHi__TQDlVmi0Vmi6ZQDxZQGh@Base 12
+ _D3std3uni16nonCharacterTrieFNaNbNdNiNfZySQBoQBn__T4TrieTSQCeQCd__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDuQDt__T9sliceBitsVmi14Vmi21ZQvTSQFcQFb__TQBiVmi10Vmi14ZQBwTSQGeQGd__TQCkVmi6Vmi10ZQCxTSQHfQHe__TQDlVmi0Vmi6ZQDxZQGh@Base 12
+ _D3std3uni16toLowerIndexTrieFNaNbNdNiNfZ3resySQBsQBr__T4TrieTtTwVmi1114112TSQCwQCv__T9sliceBitsVmi13Vmi21ZQvTSQEeQEd__TQBiVmi6Vmi13ZQBvTSQFfQFe__TQCjVmi0Vmi6ZQCvZQEd@Base 12
+ _D3std3uni16toLowerIndexTrieFNaNbNdNiNfZySQBoQBn__T4TrieTtTwVmi1114112TSQCsQCr__T9sliceBitsVmi13Vmi21ZQvTSQEaQDz__TQBiVmi6Vmi13ZQBvTSQFbQFa__TQCjVmi0Vmi6ZQCvZQEd@Base 12
+ _D3std3uni16toTitleIndexTrieFNaNbNdNiNfZ3resySQBsQBr__T4TrieTtTwVmi1114112TSQCwQCv__T9sliceBitsVmi13Vmi21ZQvTSQEeQEd__TQBiVmi6Vmi13ZQBvTSQFfQFe__TQCjVmi0Vmi6ZQCvZQEd@Base 12
+ _D3std3uni16toTitleIndexTrieFNaNbNdNiNfZySQBoQBn__T4TrieTtTwVmi1114112TSQCsQCr__T9sliceBitsVmi13Vmi21ZQvTSQEaQDz__TQBiVmi6Vmi13ZQBvTSQFbQFa__TQCjVmi0Vmi6ZQCvZQEd@Base 12
+ _D3std3uni16toUpperIndexTrieFNaNbNdNiNfZ3resySQBsQBr__T4TrieTtTwVmi1114112TSQCwQCv__T9sliceBitsVmi13Vmi21ZQvTSQEeQEd__TQBiVmi6Vmi13ZQBvTSQFfQFe__TQCjVmi0Vmi6ZQCvZQEd@Base 12
+ _D3std3uni16toUpperIndexTrieFNaNbNdNiNfZySQBoQBn__T4TrieTtTwVmi1114112TSQCsQCr__T9sliceBitsVmi13Vmi21ZQvTSQEaQDz__TQBiVmi6Vmi13ZQBvTSQFbQFa__TQCjVmi0Vmi6ZQCvZQEd@Base 12
+ _D3std3uni16unalignedWrite24FNaNbNiMPhkmZv@Base 12
+ _D3std3uni17CodepointInterval11__xopEqualsMxFKxSQBuQBtQBsZb@Base 12
+ _D3std3uni17CodepointInterval1aMNgFNaNbNcNdNiNjNfZNgk@Base 12
+ _D3std3uni17CodepointInterval1bMNgFNaNbNcNdNiNjNfZNgk@Base 12
+ _D3std3uni17CodepointInterval6__ctorMFNaNbNcNiNfkkZSQByQBxQBw@Base 12
+ _D3std3uni17CodepointInterval6__initZ@Base 12
+ _D3std3uni17CodepointInterval__T8opEqualsTxSQBqQBpQBoZQwMxFNaNbNiNfxQzZb@Base 12
+ _D3std3uni17compatMappingTrieFNaNbNdNiNfZ3resySQBtQBs__T4TrieTtTwVmi1114112TSQCxQCw__T9sliceBitsVmi13Vmi21ZQvTSQEfQEe__TQBiVmi5Vmi13ZQBvTSQFgQFf__TQCjVmi0Vmi5ZQCvZQEd@Base 12
+ _D3std3uni17compatMappingTrieFNaNbNdNiNfZySQBpQBo__T4TrieTtTwVmi1114112TSQCtQCs__T9sliceBitsVmi13Vmi21ZQvTSQEbQEa__TQBiVmi5Vmi13ZQBvTSQFcQFb__TQCjVmi0Vmi5ZQCvZQEd@Base 12
+ _D3std3uni18combiningClassTrieFNaNbNdNiNfZ3resySQBuQBt__T4TrieThTwVmi1114112TSQCyQCx__T9sliceBitsVmi13Vmi21ZQvTSQEgQEf__TQBiVmi6Vmi13ZQBvTSQFhQFg__TQCjVmi0Vmi6ZQCvZQEd@Base 12
+ _D3std3uni18combiningClassTrieFNaNbNdNiNfZySQBqQBp__T4TrieThTwVmi1114112TSQCuQCt__T9sliceBitsVmi13Vmi21ZQvTSQEcQEb__TQBiVmi6Vmi13ZQBvTSQFdQFc__TQCjVmi0Vmi6ZQCvZQEd@Base 12
+ _D3std3uni18graphemeExtendTrieFNaNbNdNiNfZ3resySQBuQBt__T4TrieTSQCkQCj__T9BitPackedTbVmi1ZQrTwVmi1114112TSQEaQDz__T9sliceBitsVmi13Vmi21ZQvTSQFiQFh__TQBiVmi8Vmi13ZQBvTSQGjQGi__TQCjVmi0Vmi8ZQCvZQFf@Base 12
+ _D3std3uni18graphemeExtendTrieFNaNbNdNiNfZySQBqQBp__T4TrieTSQCgQCf__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDwQDv__T9sliceBitsVmi13Vmi21ZQvTSQFeQFd__TQBiVmi8Vmi13ZQBvTSQGfQGe__TQCjVmi0Vmi8ZQCvZQFf@Base 12
+ _D3std3uni18simpleCaseFoldingsFNaNbNiNfwZSQBoQBnQBmFNfwZ5Range@Base 12
+ _D3std3uni18simpleCaseFoldingsFNfwZ5Range5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std3uni18simpleCaseFoldingsFNfwZ5Range5frontMxFNaNbNdNiNfZw@Base 12
+ _D3std3uni18simpleCaseFoldingsFNfwZ5Range6__ctorMFNaNbNcNiNfkkZSQCkQCjQCiFNfwZQBr@Base 12
+ _D3std3uni18simpleCaseFoldingsFNfwZ5Range6__ctorMFNaNbNcNiNfwZSQCjQCiQChFNfwZQBq@Base 12
+ _D3std3uni18simpleCaseFoldingsFNfwZ5Range6__initZ@Base 12
+ _D3std3uni18simpleCaseFoldingsFNfwZ5Range6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni18simpleCaseFoldingsFNfwZ5Range7isSmallMxFNaNbNdNiNfZb@Base 12
+ _D3std3uni18simpleCaseFoldingsFNfwZ5Range8popFrontMFNaNbNiNfZv@Base 12
+ _D3std3uni18toLowerSimpleIndexFNaNbNiNewZt@Base 12
+ _D3std3uni18toTitleSimpleIndexFNaNbNiNewZt@Base 12
+ _D3std3uni18toUpperSimpleIndexFNaNbNiNewZt@Base 12
+ _D3std3uni19compositionJumpTrieFNaNbNdNiNfZ3resySQBvQBu__T4TrieTtTwVmi1114112TSQCzQCy__T9sliceBitsVmi9Vmi21ZQuTSQEgQEf__TQBhVmi0Vmi9ZQBtZQDb@Base 12
+ _D3std3uni19compositionJumpTrieFNaNbNdNiNfZySQBrQBq__T4TrieTtTwVmi1114112TSQCvQCu__T9sliceBitsVmi9Vmi21ZQuTSQEcQEb__TQBhVmi0Vmi9ZQBtZQDb@Base 12
+ _D3std3uni19decompressIntervalsFNaNfAxhZSQBnQBm21DecompressedIntervals@Base 12
+ _D3std3uni19hangulSyllableIndexFNaNbNiNfwZi@Base 12
+ _D3std3uni19isRegionalIndicatorFNaNbNiNfwZb@Base 12
+ _D3std3uni21DecompressedIntervals11__xopEqualsMxFKxSQByQBxQBwZb@Base 12
+ _D3std3uni21DecompressedIntervals4saveMFNaNdNjNfZSQBwQBvQBu@Base 12
+ _D3std3uni21DecompressedIntervals5emptyMxFNaNdNfZb@Base 12
+ _D3std3uni21DecompressedIntervals5frontMFNaNdNfZSQBvQBu17CodepointInterval@Base 12
+ _D3std3uni21DecompressedIntervals6__ctorMFNaNcNfAxhZSQBzQByQBx@Base 12
+ _D3std3uni21DecompressedIntervals6__initZ@Base 12
+ _D3std3uni21DecompressedIntervals8popFrontMFNaNfZv@Base 12
+ _D3std3uni21DecompressedIntervals9__xtoHashFNbNeKxSQBxQBwQBvZm@Base 12
+ _D3std3uni22toLowerSimpleIndexTrieFNaNbNdNiNfZ3resySQByQBx__T4TrieTtTwVmi1114112TSQDcQDb__T9sliceBitsVmi13Vmi21ZQvTSQEkQEj__TQBiVmi6Vmi13ZQBvTSQFlQFk__TQCjVmi0Vmi6ZQCvZQEd@Base 12
+ _D3std3uni22toLowerSimpleIndexTrieFNaNbNdNiNfZySQBuQBt__T4TrieTtTwVmi1114112TSQCyQCx__T9sliceBitsVmi13Vmi21ZQvTSQEgQEf__TQBiVmi6Vmi13ZQBvTSQFhQFg__TQCjVmi0Vmi6ZQCvZQEd@Base 12
+ _D3std3uni22toTitleSimpleIndexTrieFNaNbNdNiNfZ3resySQByQBx__T4TrieTtTwVmi1114112TSQDcQDb__T9sliceBitsVmi13Vmi21ZQvTSQEkQEj__TQBiVmi6Vmi13ZQBvTSQFlQFk__TQCjVmi0Vmi6ZQCvZQEd@Base 12
+ _D3std3uni22toTitleSimpleIndexTrieFNaNbNdNiNfZySQBuQBt__T4TrieTtTwVmi1114112TSQCyQCx__T9sliceBitsVmi13Vmi21ZQvTSQEgQEf__TQBiVmi6Vmi13ZQBvTSQFhQFg__TQCjVmi0Vmi6ZQCvZQEd@Base 12
+ _D3std3uni22toUpperSimpleIndexTrieFNaNbNdNiNfZ3resySQByQBx__T4TrieTtTwVmi1114112TSQDcQDb__T9sliceBitsVmi13Vmi21ZQvTSQEkQEj__TQBiVmi6Vmi13ZQBvTSQFlQFk__TQCjVmi0Vmi6ZQCvZQEd@Base 12
+ _D3std3uni22toUpperSimpleIndexTrieFNaNbNdNiNfZySQBuQBt__T4TrieTtTwVmi1114112TSQCyQCx__T9sliceBitsVmi13Vmi21ZQvTSQEgQEf__TQBiVmi6Vmi13ZQBvTSQFhQFg__TQCjVmi0Vmi6ZQCvZQEd@Base 12
+ _D3std3uni23genUnrolledSwitchSearchFNaNbNfmZAya@Base 12
+ _D3std3uni4icmpFNaNbNiNfAxaQdZi@Base 12
+ _D3std3uni4icmpFNaNbNiNfAxuQdZi@Base 12
+ _D3std3uni4icmpFNaNbNiNfAxwQdZi@Base 12
+ _D3std3uni5asSetFNaNfAxhZSQyQw__T13InversionListTSQBwQBv8GcPolicyZQBh@Base 12
+ _D3std3uni5low_8FNaNbNiNfkZk@Base 12
+ _D3std3uni5sicmpFNaNbNiNfMAxaMQeZi@Base 12
+ _D3std3uni5sicmpFNaNbNiNfMAxuMQeZi@Base 12
+ _D3std3uni5sicmpFNaNbNiNfMAxwMQeZi@Base 12
+ _D3std3uni6hangLVFNaNbNdNiNfZ3resySQBhQBg__T4TrieTSQBxQBw__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDnQDm__T9sliceBitsVmi13Vmi21ZQvTSQEvQEu__TQBiVmi8Vmi13ZQBvTSQFwQFv__TQCjVmi0Vmi8ZQCvZQFf@Base 12
+ _D3std3uni6hangLVFNaNbNdNiNfZySQBdQBc__T4TrieTSQBtQBs__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDjQDi__T9sliceBitsVmi13Vmi21ZQvTSQErQEq__TQBiVmi8Vmi13ZQBvTSQFsQFr__TQCjVmi0Vmi8ZQCvZQFf@Base 12
+ _D3std3uni6isMarkFNaNbNiNfwZb@Base 12
+ _D3std3uni6mcTrieFNaNbNdNiNfZ3resySQBhQBg__T4TrieTSQBxQBw__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDnQDm__T9sliceBitsVmi13Vmi21ZQvTSQEvQEu__TQBiVmi8Vmi13ZQBvTSQFwQFv__TQCjVmi0Vmi8ZQCvZQFf@Base 12
+ _D3std3uni6mcTrieFNaNbNdNiNfZySQBdQBc__T4TrieTSQBtQBs__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDjQDi__T9sliceBitsVmi13Vmi21ZQvTSQErQEq__TQBiVmi8Vmi13ZQBvTSQFsQFr__TQCjVmi0Vmi8ZQCvZQFf@Base 12
+ _D3std3uni6read24FNaNbNiMxPhmZk@Base 12
+ _D3std3uni7composeFNaNbNfwwZw@Base 12
+ _D3std3uni7hangLVTFNaNbNdNiNfZ3resySQBiQBh__T4TrieTSQByQBx__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDoQDn__T9sliceBitsVmi13Vmi21ZQvTSQEwQEv__TQBiVmi8Vmi13ZQBvTSQFxQFw__TQCjVmi0Vmi8ZQCvZQFf@Base 12
+ _D3std3uni7hangLVTFNaNbNdNiNfZySQBeQBd__T4TrieTSQBuQBt__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDkQDj__T9sliceBitsVmi13Vmi21ZQvTSQEsQEr__TQBiVmi8Vmi13ZQBvTSQFtQFs__TQCjVmi0Vmi8ZQCvZQFf@Base 12
+ _D3std3uni7isAlphaFNaNbNiNfwZb@Base 12
+ _D3std3uni7isJamoLFNaNbNiNfwZb@Base 12
+ _D3std3uni7isJamoTFNaNbNiNfwZb@Base 12
+ _D3std3uni7isJamoVFNaNbNiNfwZb@Base 12
+ _D3std3uni7isLowerFNaNbNiNfwZb@Base 12
+ _D3std3uni7isSpaceFNaNbNiNfwZb@Base 12
+ _D3std3uni7isUpperFNaNbNiNfwZb@Base 12
+ _D3std3uni7isWhiteFNaNbNiNfwZb@Base 12
+ _D3std3uni7toLowerFNaNbNiNfwZw@Base 12
+ _D3std3uni7toLowerFNaNfNkMAyaZQe@Base 12
+ _D3std3uni7toLowerFNaNfNkMAyuZQe@Base 12
+ _D3std3uni7toLowerFNaNfNkMAywZQe@Base 12
+ _D3std3uni7toUpperFNaNbNiNfwZw@Base 12
+ _D3std3uni7toUpperFNaNfNkMAyaZQe@Base 12
+ _D3std3uni7toUpperFNaNfNkMAyuZQe@Base 12
+ _D3std3uni7toUpperFNaNfNkMAywZQe@Base 12
+ _D3std3uni7unicode18hangulSyllableType6__initZ@Base 12
+ _D3std3uni7unicode5block6__initZ@Base 12
+ _D3std3uni7unicode6__initZ@Base 12
+ _D3std3uni7unicode6script6__initZ@Base 12
+ _D3std3uni7unicode7findAnyFNfAyaZb@Base 12
+ _D3std3uni7unicode__T10opDispatchVAyaa10_416c7068616265746963ZQBpFNaNdNfZSQCuQCt__T13InversionListTSQDuQDt8GcPolicyZQBh@Base 12
+ _D3std3uni7unicode__T10opDispatchVAyaa11_57686974655f5370616365ZQBrFNaNdNfZSQCwQCv__T13InversionListTSQDwQDv8GcPolicyZQBh@Base 12
+ _D3std3uni7unicode__T10opDispatchVAyaa2_4c43ZQyFNaNdNfZSQCcQCb__T13InversionListTSQDcQDb8GcPolicyZQBh@Base 12
+ _D3std3uni7unicode__T10opDispatchVAyaa2_4d63ZQyFNaNdNfZSQCcQCb__T13InversionListTSQDcQDb8GcPolicyZQBh@Base 12
+ _D3std3uni7unicode__T10opDispatchVAyaa2_4d65ZQyFNaNdNfZSQCcQCb__T13InversionListTSQDcQDb8GcPolicyZQBh@Base 12
+ _D3std3uni7unicode__T10opDispatchVAyaa2_4d6eZQyFNaNdNfZSQCcQCb__T13InversionListTSQDcQDb8GcPolicyZQBh@Base 12
+ _D3std3uni7unicode__T10opDispatchVAyaa2_4e64ZQyFNaNdNfZSQCcQCb__T13InversionListTSQDcQDb8GcPolicyZQBh@Base 12
+ _D3std3uni7unicode__T10opDispatchVAyaa2_5063ZQyFNaNdNfZSQCcQCb__T13InversionListTSQDcQDb8GcPolicyZQBh@Base 12
+ _D3std3uni7unicode__T10opDispatchVAyaa5_4153434949ZQBeFNaNdNfZSQCjQCi__T13InversionListTSQDjQDi8GcPolicyZQBh@Base 12
+ _D3std3uni7unicode__T16parseControlCodeTSQBn5regex8internal6parser__T6ParserTAyaTSQDcQBpQBmQBg7CodeGenZQBiZQDiFNaNfKQCyZw@Base 12
+ _D3std3uni7unicode__T16parseControlCodeTSQBnQBm__T16UnicodeSetParserTSQCq5regex8internal6parser__T6ParserTAyaTSQEfQBpQBmQBg7CodeGenZQBiZQDiZQEpFNaNfKQEfZw@Base 12
+ _D3std3uni7unicode__T17parsePropertySpecTSQBo5regex8internal6parser__T6ParserTAyaTSQDdQBpQBmQBg7CodeGenZQBiZQDjFNfKQCwbbZSQEqQEp__T13InversionListTSQFqQFp8GcPolicyZQBh@Base 12
+ _D3std3uni7unicode__T17parsePropertySpecTSQBoQBn__T16UnicodeSetParserTSQCr5regex8internal6parser__T6ParserTAyaTSQEgQBpQBmQBg7CodeGenZQBiZQDiZQEqFNfKQEdbbZSQFxQFw__T13InversionListTSQGxQGw8GcPolicyZQBh@Base 12
+ _D3std3uni7unicode__T6opCallTaZQkFNaNfMxAaZSQBqQBp__T13InversionListTSQCqQCp8GcPolicyZQBh@Base 12
+ _D3std3uni7unicode__T7loadAnyTSQBdQBc__T13InversionListTSQCdQCc8GcPolicyZQBhTaZQCgFNaNfMxAaZQCk@Base 12
+ _D3std3uni7unicode__T8parseSetTSQBe5regex8internal6parser__T6ParserTAyaTSQCtQBpQBmQBg7CodeGenZQBiZQCzFNfKQCwbZSQEfQEe__T13InversionListTSQFfQFe8GcPolicyZQBh@Base 12
+ _D3std3uni7write24FNaNbNiMPhkmZv@Base 12
+ _D3std3uni8GcPolicy6__initZ@Base 12
+ _D3std3uni8GcPolicy__T5allocTkZQjFNaNbNfmZAk@Base 12
+ _D3std3uni8GcPolicy__T6appendTkTiZQmFNaNbNfKAkiZv@Base 12
+ _D3std3uni8GcPolicy__T7destroyTAkZQmFNaNbNiNfKQpZv@Base 12
+ _D3std3uni8GcPolicy__T7reallocTkZQlFNaNbNfAkmZQe@Base 12
+ _D3std3uni8Grapheme10__postblitMFNaNbNiNeZv@Base 12
+ _D3std3uni8Grapheme11smallLengthMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni8Grapheme12convertToBigMFNaNbNiNeZv@Base 12
+ _D3std3uni8Grapheme13opIndexAssignMFNaNbNiNewmZv@Base 12
+ _D3std3uni8Grapheme5isBigMxFNaNbNdNiNfZh@Base 12
+ _D3std3uni8Grapheme6__dtorMFNaNbNiNeZv@Base 12
+ _D3std3uni8Grapheme6__initZ@Base 12
+ _D3std3uni8Grapheme6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni8Grapheme6setBigMFNaNbNiNfZv@Base 12
+ _D3std3uni8Grapheme7opIndexMxFNaNbNiNemZw@Base 12
+ _D3std3uni8Grapheme7opSliceMFNaNbNiNjNfZSQBnQBm__T16SliceOverIndexedTSQCqQCpQCoZQBe@Base 12
+ _D3std3uni8Grapheme7opSliceMFNaNbNiNjNfmmZSQBpQBo__T16SliceOverIndexedTSQCsQCrQCqZQBe@Base 12
+ _D3std3uni8Grapheme8opAssignMFNaNbNcNiNjNeSQBpQBoQBnZQl@Base 12
+ _D3std3uni8Grapheme__T10opOpAssignVAyaa1_7eTAxiZQBaMFNaNbNcNiNfMQuZSQCoQCnQCm@Base 12
+ _D3std3uni8Grapheme__T10opOpAssignVAyaa1_7eTAxwZQBaMFNaNbNcNiNfMQuZSQCoQCnQCm@Base 12
+ _D3std3uni8Grapheme__T10opOpAssignVAyaa1_7eZQwMFNaNbNcNiNewZSQChQCgQCf@Base 12
+ _D3std3uni8Grapheme__T6__ctorTiZQkMFNaNbNcNiNfMxAiXSQByQBxQBw@Base 12
+ _D3std3uni8Grapheme__T6__ctorTwZQkMFNaNbNcNiNfMxAwXSQByQBxQBw@Base 12
+ _D3std3uni8encodeToFNaNbNiNeMAamwZm@Base 12
+ _D3std3uni8encodeToFNaNbNiNeMAwmwZm@Base 12
+ _D3std3uni8encodeToFNaNeMAumwZm@Base 12
+ _D3std3uni8isFormatFNaNbNiNfwZb@Base 12
+ _D3std3uni8isNumberFNaNbNiNfwZb@Base 12
+ _D3std3uni8isSymbolFNaNbNiNfwZb@Base 12
+ _D3std3uni8markTrieFNaNbNdNiNfZ3resySQBjQBi__T4TrieTSQBzQBy__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDpQDo__T9sliceBitsVmi13Vmi21ZQvTSQExQEw__TQBiVmi8Vmi13ZQBvTSQFyQFx__TQCjVmi0Vmi8ZQCvZQFf@Base 12
+ _D3std3uni8markTrieFNaNbNdNiNfZySQBfQBe__T4TrieTSQBvQBu__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDlQDk__T9sliceBitsVmi13Vmi21ZQvTSQEtQEs__TQBiVmi8Vmi13ZQBvTSQFuQFt__TQCjVmi0Vmi8ZQCvZQFf@Base 12
+ _D3std3uni8midlow_8FNaNbNiNfkZk@Base 12
+ _D3std3uni9alphaTrieFNaNbNdNiNfZ3resySQBkQBj__T4TrieTSQCaQBz__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDqQDp__T9sliceBitsVmi13Vmi21ZQvTSQEyQEx__TQBiVmi8Vmi13ZQBvTSQFzQFy__TQCjVmi0Vmi8ZQCvZQFf@Base 12
+ _D3std3uni9alphaTrieFNaNbNdNiNfZySQBgQBf__T4TrieTSQBwQBv__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDmQDl__T9sliceBitsVmi13Vmi21ZQvTSQEuQEt__TQBiVmi8Vmi13ZQBvTSQFvQFu__TQCjVmi0Vmi8ZQCvZQFf@Base 12
+ _D3std3uni9isControlFNaNbNiNfwZb@Base 12
+ _D3std3uni9nfcQCTrieFNaNbNdNiNfZ3resySQBkQBj__T4TrieTSQCaQBz__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDqQDp__T9sliceBitsVmi13Vmi21ZQvTSQEyQEx__TQBiVmi8Vmi13ZQBvTSQFzQFy__TQCjVmi0Vmi8ZQCvZQFf@Base 12
+ _D3std3uni9nfcQCTrieFNaNbNdNiNfZySQBgQBf__T4TrieTSQBwQBv__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDmQDl__T9sliceBitsVmi13Vmi21ZQvTSQEuQEt__TQBiVmi8Vmi13ZQBvTSQFvQFu__TQCjVmi0Vmi8ZQCvZQFf@Base 12
+ _D3std3uni9nfdQCTrieFNaNbNdNiNfZ3resySQBkQBj__T4TrieTSQCaQBz__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDqQDp__T9sliceBitsVmi13Vmi21ZQvTSQEyQEx__TQBiVmi8Vmi13ZQBvTSQFzQFy__TQCjVmi0Vmi8ZQCvZQFf@Base 12
+ _D3std3uni9nfdQCTrieFNaNbNdNiNfZySQBgQBf__T4TrieTSQBwQBv__T9BitPackedTbVmi1ZQrTwVmi1114112TSQDmQDl__T9sliceBitsVmi13Vmi21ZQvTSQEuQEt__TQBiVmi8Vmi13ZQBvTSQFvQFu__TQCjVmi0Vmi8ZQCvZQFf@Base 12
+ _D3std3uni9recomposeFNaNbNfmMAwMAhZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi12ZQsTtZQBs11__xopEqualsMxFKxSQCzQCy__TQCxTQCoTtZQDhZb@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi12ZQsTtZQBs6__ctorMFNaNbNcNfAmXSQDbQDa__TQCzTQCqTtZQDj@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi12ZQsTtZQBs6__ctorMxFNaNbNcNiNjNfAxmQdNkMQiZxSQDpQDo__TQDnTQDeTtZQDx@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi12ZQsTtZQBs6__initZ@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi12ZQsTtZQBs9__xtoHashFNbNeKxSQCyQCx__TQCwTQCnTtZQDgZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi12ZQsTtZQBs__T3ptrVmi0ZQjMNgFNaNbNdNiZNgSQDkQDj__T13PackedPtrImplTQDlVmi16ZQz@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi12ZQsTtZQBs__T3ptrVmi1ZQjMNgFNaNbNdNiZNgSQDkQDj__T13PackedPtrImplTtVmi16ZQx@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi12ZQsTtZQBs__T5sliceVmi0ZQlMNgFNaNbNdNiZNgSQDmQDl__T19PackedArrayViewImplTQDtVmi16ZQBf@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi12ZQsTtZQBs__T5sliceVmi1ZQlMNgFNaNbNdNiZNgSQDmQDl__T19PackedArrayViewImplTtVmi16ZQBd@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi12ZQsTtZQBs__T6lengthVmi0ZQmMFNaNbNdmZv@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi12ZQsTtZQBs__T6lengthVmi0ZQmMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi12ZQsTtZQBs__T6lengthVmi1ZQmMFNaNbNdNfmZv@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi12ZQsTtZQBs__T6lengthVmi1ZQmMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi12ZQsTtZQBs__T7raw_ptrVmi0ZQnMNgFNaNbNdNiZPNgm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi12ZQsTtZQBs__T7raw_ptrVmi1ZQnMNgFNaNbNdNiZPNgm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi13ZQsTSQCcQCb__TQBfTbVmi1ZQBpZQCo11__xopEqualsMxFKxSQDvQDu__TQDtTQDkTQClZQEfZb@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi13ZQsTSQCcQCb__TQBfTbVmi1ZQBpZQCo6__ctorMFNaNbNcNfAmXSQDxQDw__TQDvTQDmTQCnZQEh@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi13ZQsTSQCcQCb__TQBfTbVmi1ZQBpZQCo6__ctorMxFNaNbNcNiNjNfAxmQdNkMQiZxSQElQEk__TQEjTQEaTQDbZQEv@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi13ZQsTSQCcQCb__TQBfTbVmi1ZQBpZQCo6__initZ@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi13ZQsTSQCcQCb__TQBfTbVmi1ZQBpZQCo9__xtoHashFNbNeKxSQDuQDt__TQDsTQDjTQCkZQEeZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi13ZQsTSQCcQCb__TQBfTbVmi1ZQBpZQCo__T3ptrVmi0ZQjMNgFNaNbNdNiZNgSQEgQEf__T13PackedPtrImplTQEhVmi16ZQz@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi13ZQsTSQCcQCb__TQBfTbVmi1ZQBpZQCo__T3ptrVmi1ZQjMNgFNaNbNdNiZNgSQEgQEf__T13PackedPtrImplTQDeVmi1ZQy@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi13ZQsTSQCcQCb__TQBfTbVmi1ZQBpZQCo__T5sliceVmi0ZQlMNgFNaNbNdNiZNgSQEiQEh__T19PackedArrayViewImplTQEpVmi16ZQBf@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi13ZQsTSQCcQCb__TQBfTbVmi1ZQBpZQCo__T5sliceVmi1ZQlMNgFNaNbNdNiZNgSQEiQEh__T19PackedArrayViewImplTQDmVmi1ZQBe@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi13ZQsTSQCcQCb__TQBfTbVmi1ZQBpZQCo__T6lengthVmi0ZQmMFNaNbNdmZv@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi13ZQsTSQCcQCb__TQBfTbVmi1ZQBpZQCo__T6lengthVmi0ZQmMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi13ZQsTSQCcQCb__TQBfTbVmi1ZQBpZQCo__T6lengthVmi1ZQmMFNaNbNdNfmZv@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi13ZQsTSQCcQCb__TQBfTbVmi1ZQBpZQCo__T6lengthVmi1ZQmMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi13ZQsTSQCcQCb__TQBfTbVmi1ZQBpZQCo__T7raw_ptrVmi0ZQnMNgFNaNbNdNiZPNgm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi13ZQsTSQCcQCb__TQBfTbVmi1ZQBpZQCo__T7raw_ptrVmi1ZQnMNgFNaNbNdNiZPNgm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi7ZQrTSQCbQCa__TQBeTkVmi11ZQBpTSQDaQCz__TQCdTkVmi15ZQCoTSQDzQDy__TQDcTbVmi1ZQDmZQEl11__xopEqualsMxFKxSQFsQFr__TQFqTQFhTQEjTQDoTQCtZQGkZb@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi7ZQrTSQCbQCa__TQBeTkVmi11ZQBpTSQDaQCz__TQCdTkVmi15ZQCoTSQDzQDy__TQDcTbVmi1ZQDmZQEl6__ctorMFNaNbNcNfAmXSQFuQFt__TQFsTQFjTQElTQDqTQCvZQGm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi7ZQrTSQCbQCa__TQBeTkVmi11ZQBpTSQDaQCz__TQCdTkVmi15ZQCoTSQDzQDy__TQDcTbVmi1ZQDmZQEl6__ctorMxFNaNbNcNiNjNfAxmQdNkMQiZxSQGiQGh__TQGgTQFxTQEzTQEeTQDjZQHa@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi7ZQrTSQCbQCa__TQBeTkVmi11ZQBpTSQDaQCz__TQCdTkVmi15ZQCoTSQDzQDy__TQDcTbVmi1ZQDmZQEl6__initZ@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi7ZQrTSQCbQCa__TQBeTkVmi11ZQBpTSQDaQCz__TQCdTkVmi15ZQCoTSQDzQDy__TQDcTbVmi1ZQDmZQEl9__xtoHashFNbNeKxSQFrQFq__TQFpTQFgTQEiTQDnTQCsZQGjZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi7ZQrTSQCbQCa__TQBeTkVmi11ZQBpTSQDaQCz__TQCdTkVmi15ZQCoTSQDzQDy__TQDcTbVmi1ZQDmZQEl__T3ptrVmi0ZQjMNgFNaNbNdNiZNgSQGdQGc__T13PackedPtrImplTQGeVmi8ZQy@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi7ZQrTSQCbQCa__TQBeTkVmi11ZQBpTSQDaQCz__TQCdTkVmi15ZQCoTSQDzQDy__TQDcTbVmi1ZQDmZQEl__T3ptrVmi1ZQjMNgFNaNbNdNiZNgSQGdQGc__T13PackedPtrImplTQFcVmi16ZQz@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi7ZQrTSQCbQCa__TQBeTkVmi11ZQBpTSQDaQCz__TQCdTkVmi15ZQCoTSQDzQDy__TQDcTbVmi1ZQDmZQEl__T3ptrVmi2ZQjMNgFNaNbNdNiZNgSQGdQGc__T13PackedPtrImplTQEdVmi16ZQz@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi7ZQrTSQCbQCa__TQBeTkVmi11ZQBpTSQDaQCz__TQCdTkVmi15ZQCoTSQDzQDy__TQDcTbVmi1ZQDmZQEl__T3ptrVmi3ZQjMNgFNaNbNdNiZNgSQGdQGc__T13PackedPtrImplTQDeVmi1ZQy@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi7ZQrTSQCbQCa__TQBeTkVmi11ZQBpTSQDaQCz__TQCdTkVmi15ZQCoTSQDzQDy__TQDcTbVmi1ZQDmZQEl__T5sliceVmi0ZQlMNgFNaNbNdNiZNgSQGfQGe__T19PackedArrayViewImplTQGmVmi8ZQBe@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi7ZQrTSQCbQCa__TQBeTkVmi11ZQBpTSQDaQCz__TQCdTkVmi15ZQCoTSQDzQDy__TQDcTbVmi1ZQDmZQEl__T5sliceVmi1ZQlMNgFNaNbNdNiZNgSQGfQGe__T19PackedArrayViewImplTQFkVmi16ZQBf@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi7ZQrTSQCbQCa__TQBeTkVmi11ZQBpTSQDaQCz__TQCdTkVmi15ZQCoTSQDzQDy__TQDcTbVmi1ZQDmZQEl__T5sliceVmi2ZQlMNgFNaNbNdNiZNgSQGfQGe__T19PackedArrayViewImplTQElVmi16ZQBf@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi7ZQrTSQCbQCa__TQBeTkVmi11ZQBpTSQDaQCz__TQCdTkVmi15ZQCoTSQDzQDy__TQDcTbVmi1ZQDmZQEl__T5sliceVmi3ZQlMNgFNaNbNdNiZNgSQGfQGe__T19PackedArrayViewImplTQDmVmi1ZQBe@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi7ZQrTSQCbQCa__TQBeTkVmi11ZQBpTSQDaQCz__TQCdTkVmi15ZQCoTSQDzQDy__TQDcTbVmi1ZQDmZQEl__T6lengthVmi0ZQmMFNaNbNdmZv@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi7ZQrTSQCbQCa__TQBeTkVmi11ZQBpTSQDaQCz__TQCdTkVmi15ZQCoTSQDzQDy__TQDcTbVmi1ZQDmZQEl__T6lengthVmi0ZQmMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi7ZQrTSQCbQCa__TQBeTkVmi11ZQBpTSQDaQCz__TQCdTkVmi15ZQCoTSQDzQDy__TQDcTbVmi1ZQDmZQEl__T6lengthVmi1ZQmMFNaNbNdmZv@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi7ZQrTSQCbQCa__TQBeTkVmi11ZQBpTSQDaQCz__TQCdTkVmi15ZQCoTSQDzQDy__TQDcTbVmi1ZQDmZQEl__T6lengthVmi1ZQmMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi7ZQrTSQCbQCa__TQBeTkVmi11ZQBpTSQDaQCz__TQCdTkVmi15ZQCoTSQDzQDy__TQDcTbVmi1ZQDmZQEl__T6lengthVmi2ZQmMFNaNbNdmZv@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi7ZQrTSQCbQCa__TQBeTkVmi11ZQBpTSQDaQCz__TQCdTkVmi15ZQCoTSQDzQDy__TQDcTbVmi1ZQDmZQEl__T6lengthVmi2ZQmMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi7ZQrTSQCbQCa__TQBeTkVmi11ZQBpTSQDaQCz__TQCdTkVmi15ZQCoTSQDzQDy__TQDcTbVmi1ZQDmZQEl__T6lengthVmi3ZQmMFNaNbNdNfmZv@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi7ZQrTSQCbQCa__TQBeTkVmi11ZQBpTSQDaQCz__TQCdTkVmi15ZQCoTSQDzQDy__TQDcTbVmi1ZQDmZQEl__T6lengthVmi3ZQmMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi7ZQrTSQCbQCa__TQBeTkVmi11ZQBpTSQDaQCz__TQCdTkVmi15ZQCoTSQDzQDy__TQDcTbVmi1ZQDmZQEl__T7raw_ptrVmi0ZQnMNgFNaNbNdNiZPNgm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi7ZQrTSQCbQCa__TQBeTkVmi11ZQBpTSQDaQCz__TQCdTkVmi15ZQCoTSQDzQDy__TQDcTbVmi1ZQDmZQEl__T7raw_ptrVmi1ZQnMNgFNaNbNdNiZPNgm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi7ZQrTSQCbQCa__TQBeTkVmi11ZQBpTSQDaQCz__TQCdTkVmi15ZQCoTSQDzQDy__TQDcTbVmi1ZQDmZQEl__T7raw_ptrVmi2ZQnMNgFNaNbNdNiZPNgm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi7ZQrTSQCbQCa__TQBeTkVmi11ZQBpTSQDaQCz__TQCdTkVmi15ZQCoTSQDzQDy__TQDcTbVmi1ZQDmZQEl__T7raw_ptrVmi3ZQnMNgFNaNbNdNiZPNgm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi12ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm11__xopEqualsMxFKxSQEtQEs__TQErTQEiTQDkTQCpZQFhZb@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi12ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm6__ctorMFNaNbNcNfAmXSQEvQEu__TQEtTQEkTQDmTQCrZQFj@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi12ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm6__ctorMxFNaNbNcNiNjNfAxmQdNkMQiZxSQFjQFi__TQFhTQEyTQEaTQDfZQFx@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi12ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm6__initZ@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi12ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm9__xtoHashFNbNeKxSQEsQEr__TQEqTQEhTQDjTQCoZQFgZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi12ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T3ptrVmi0ZQjMNgFNaNbNdNiZNgSQFeQFd__T13PackedPtrImplTQFfVmi8ZQy@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi12ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T3ptrVmi1ZQjMNgFNaNbNdNiZNgSQFeQFd__T13PackedPtrImplTQEdVmi16ZQz@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi12ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T3ptrVmi2ZQjMNgFNaNbNdNiZNgSQFeQFd__T13PackedPtrImplTQDeVmi1ZQy@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi12ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T5sliceVmi0ZQlMNgFNaNbNdNiZNgSQFgQFf__T19PackedArrayViewImplTQFnVmi8ZQBe@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi12ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T5sliceVmi1ZQlMNgFNaNbNdNiZNgSQFgQFf__T19PackedArrayViewImplTQElVmi16ZQBf@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi12ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T5sliceVmi2ZQlMNgFNaNbNdNiZNgSQFgQFf__T19PackedArrayViewImplTQDmVmi1ZQBe@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi12ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T6lengthVmi0ZQmMFNaNbNdmZv@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi12ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T6lengthVmi0ZQmMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi12ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T6lengthVmi1ZQmMFNaNbNdmZv@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi12ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T6lengthVmi1ZQmMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi12ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T6lengthVmi2ZQmMFNaNbNdNfmZv@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi12ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T6lengthVmi2ZQmMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi12ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T7raw_ptrVmi0ZQnMNgFNaNbNdNiZPNgm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi12ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T7raw_ptrVmi1ZQnMNgFNaNbNdNiZPNgm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi12ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T7raw_ptrVmi2ZQnMNgFNaNbNdNiZPNgm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi13ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm11__xopEqualsMxFKxSQEtQEs__TQErTQEiTQDkTQCpZQFhZb@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi13ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm6__ctorMFNaNbNcNfAmXSQEvQEu__TQEtTQEkTQDmTQCrZQFj@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi13ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm6__ctorMxFNaNbNcNiNjNfAxmQdNkMQiZxSQFjQFi__TQFhTQEyTQEaTQDfZQFx@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi13ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm6__initZ@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi13ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm9__xtoHashFNbNeKxSQEsQEr__TQEqTQEhTQDjTQCoZQFgZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi13ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T3ptrVmi0ZQjMNgFNaNbNdNiZNgSQFeQFd__T13PackedPtrImplTQFfVmi8ZQy@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi13ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T3ptrVmi1ZQjMNgFNaNbNdNiZNgSQFeQFd__T13PackedPtrImplTQEdVmi16ZQz@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi13ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T3ptrVmi2ZQjMNgFNaNbNdNiZNgSQFeQFd__T13PackedPtrImplTQDeVmi1ZQy@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi13ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T5sliceVmi0ZQlMNgFNaNbNdNiZNgSQFgQFf__T19PackedArrayViewImplTQFnVmi8ZQBe@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi13ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T5sliceVmi1ZQlMNgFNaNbNdNiZNgSQFgQFf__T19PackedArrayViewImplTQElVmi16ZQBf@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi13ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T5sliceVmi2ZQlMNgFNaNbNdNiZNgSQFgQFf__T19PackedArrayViewImplTQDmVmi1ZQBe@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi13ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T6lengthVmi0ZQmMFNaNbNdmZv@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi13ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T6lengthVmi0ZQmMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi13ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T6lengthVmi1ZQmMFNaNbNdmZv@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi13ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T6lengthVmi1ZQmMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi13ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T6lengthVmi2ZQmMFNaNbNdNfmZv@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi13ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T6lengthVmi2ZQmMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi13ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T7raw_ptrVmi0ZQnMNgFNaNbNdNiZPNgm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi13ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T7raw_ptrVmi1ZQnMNgFNaNbNdNiZPNgm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi13ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T7raw_ptrVmi2ZQnMNgFNaNbNdNiZPNgm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi14ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm11__xopEqualsMxFKxSQEtQEs__TQErTQEiTQDkTQCpZQFhZb@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi14ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm6__ctorMFNaNbNcNfAmXSQEvQEu__TQEtTQEkTQDmTQCrZQFj@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi14ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm6__ctorMxFNaNbNcNiNjNfAxmQdNkMQiZxSQFjQFi__TQFhTQEyTQEaTQDfZQFx@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi14ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm6__initZ@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi14ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm9__xtoHashFNbNeKxSQEsQEr__TQEqTQEhTQDjTQCoZQFgZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi14ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T3ptrVmi0ZQjMNgFNaNbNdNiZNgSQFeQFd__T13PackedPtrImplTQFfVmi8ZQy@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi14ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T3ptrVmi1ZQjMNgFNaNbNdNiZNgSQFeQFd__T13PackedPtrImplTQEdVmi16ZQz@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi14ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T3ptrVmi2ZQjMNgFNaNbNdNiZNgSQFeQFd__T13PackedPtrImplTQDeVmi1ZQy@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi14ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T5sliceVmi0ZQlMNgFNaNbNdNiZNgSQFgQFf__T19PackedArrayViewImplTQFnVmi8ZQBe@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi14ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T5sliceVmi1ZQlMNgFNaNbNdNiZNgSQFgQFf__T19PackedArrayViewImplTQElVmi16ZQBf@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi14ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T5sliceVmi2ZQlMNgFNaNbNdNiZNgSQFgQFf__T19PackedArrayViewImplTQDmVmi1ZQBe@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi14ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T6lengthVmi0ZQmMFNaNbNdmZv@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi14ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T6lengthVmi0ZQmMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi14ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T6lengthVmi1ZQmMFNaNbNdmZv@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi14ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T6lengthVmi1ZQmMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi14ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T6lengthVmi2ZQmMFNaNbNdNfmZv@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi14ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T6lengthVmi2ZQmMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi14ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T7raw_ptrVmi0ZQnMNgFNaNbNdNiZPNgm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi14ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T7raw_ptrVmi1ZQnMNgFNaNbNdNiZPNgm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi14ZQBpTSQDaQCz__TQCdTbVmi1ZQCnZQDm__T7raw_ptrVmi2ZQnMNgFNaNbNdNiZPNgm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpThZQCq11__xopEqualsMxFKxSQDxQDw__TQDvTQDmTQCoThZQEjZb@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpThZQCq6__ctorMFNaNbNcNfAmXSQDzQDy__TQDxTQDoTQCqThZQEl@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpThZQCq6__ctorMxFNaNbNcNiNjNfAxmQdNkMQiZxSQEnQEm__TQElTQEcTQDeThZQEz@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpThZQCq6__initZ@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpThZQCq9__xtoHashFNbNeKxSQDwQDv__TQDuTQDlTQCnThZQEiZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpThZQCq__T3ptrVmi0ZQjMNgFNaNbNdNiZNgSQEiQEh__T13PackedPtrImplTQEjVmi8ZQy@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpThZQCq__T3ptrVmi1ZQjMNgFNaNbNdNiZNgSQEiQEh__T13PackedPtrImplTQDhVmi16ZQz@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpThZQCq__T3ptrVmi2ZQjMNgFNaNbNdNiZNgSQEiQEh__T13PackedPtrImplThVmi8ZQw@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpThZQCq__T5sliceVmi0ZQlMNgFNaNbNdNiZNgSQEkQEj__T19PackedArrayViewImplTQErVmi8ZQBe@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpThZQCq__T5sliceVmi1ZQlMNgFNaNbNdNiZNgSQEkQEj__T19PackedArrayViewImplTQDpVmi16ZQBf@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpThZQCq__T5sliceVmi2ZQlMNgFNaNbNdNiZNgSQEkQEj__T19PackedArrayViewImplThVmi8ZQBc@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpThZQCq__T6lengthVmi0ZQmMFNaNbNdmZv@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpThZQCq__T6lengthVmi0ZQmMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpThZQCq__T6lengthVmi1ZQmMFNaNbNdmZv@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpThZQCq__T6lengthVmi1ZQmMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpThZQCq__T6lengthVmi2ZQmMFNaNbNdNfmZv@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpThZQCq__T6lengthVmi2ZQmMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpThZQCq__T7raw_ptrVmi0ZQnMNgFNaNbNdNiZPNgm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpThZQCq__T7raw_ptrVmi1ZQnMNgFNaNbNdNiZPNgm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpThZQCq__T7raw_ptrVmi2ZQnMNgFNaNbNdNiZPNgm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpTtZQCq11__xopEqualsMxFKxSQDxQDw__TQDvTQDmTQCoTtZQEjZb@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpTtZQCq6__ctorMFNaNbNcNfAmXSQDzQDy__TQDxTQDoTQCqTtZQEl@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpTtZQCq6__ctorMxFNaNbNcNiNjNfAxmQdNkMQiZxSQEnQEm__TQElTQEcTQDeTtZQEz@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpTtZQCq6__initZ@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpTtZQCq9__xtoHashFNbNeKxSQDwQDv__TQDuTQDlTQCnTtZQEiZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpTtZQCq__T3ptrVmi0ZQjMNgFNaNbNdNiZNgSQEiQEh__T13PackedPtrImplTQEjVmi8ZQy@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpTtZQCq__T3ptrVmi1ZQjMNgFNaNbNdNiZNgSQEiQEh__T13PackedPtrImplTQDhVmi16ZQz@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpTtZQCq__T3ptrVmi2ZQjMNgFNaNbNdNiZNgSQEiQEh__T13PackedPtrImplTtVmi16ZQx@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpTtZQCq__T5sliceVmi0ZQlMNgFNaNbNdNiZNgSQEkQEj__T19PackedArrayViewImplTQErVmi8ZQBe@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpTtZQCq__T5sliceVmi1ZQlMNgFNaNbNdNiZNgSQEkQEj__T19PackedArrayViewImplTQDpVmi16ZQBf@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpTtZQCq__T5sliceVmi2ZQlMNgFNaNbNdNiZNgSQEkQEj__T19PackedArrayViewImplTtVmi16ZQBd@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpTtZQCq__T6lengthVmi0ZQmMFNaNbNdmZv@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpTtZQCq__T6lengthVmi0ZQmMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpTtZQCq__T6lengthVmi1ZQmMFNaNbNdmZv@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpTtZQCq__T6lengthVmi1ZQmMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpTtZQCq__T6lengthVmi2ZQmMFNaNbNdNfmZv@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpTtZQCq__T6lengthVmi2ZQmMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpTtZQCq__T7raw_ptrVmi0ZQnMNgFNaNbNdNiZPNgm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpTtZQCq__T7raw_ptrVmi1ZQnMNgFNaNbNdNiZPNgm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpTtZQCq__T7raw_ptrVmi2ZQnMNgFNaNbNdNiZPNgm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi16ZQBpTtZQCq11__xopEqualsMxFKxSQDxQDw__TQDvTQDmTQCoTtZQEjZb@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi16ZQBpTtZQCq6__ctorMFNaNbNcNfAmXSQDzQDy__TQDxTQDoTQCqTtZQEl@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi16ZQBpTtZQCq6__ctorMxFNaNbNcNiNjNfAxmQdNkMQiZxSQEnQEm__TQElTQEcTQDeTtZQEz@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi16ZQBpTtZQCq6__initZ@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi16ZQBpTtZQCq9__xtoHashFNbNeKxSQDwQDv__TQDuTQDlTQCnTtZQEiZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi16ZQBpTtZQCq__T5sliceVmi0ZQlMNgFNaNbNdNiZNgSQEkQEj__T19PackedArrayViewImplTQErVmi8ZQBe@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi16ZQBpTtZQCq__T5sliceVmi1ZQlMNgFNaNbNdNiZNgSQEkQEj__T19PackedArrayViewImplTQDpVmi16ZQBf@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi16ZQBpTtZQCq__T5sliceVmi2ZQlMNgFNaNbNdNiZNgSQEkQEj__T19PackedArrayViewImplTtVmi16ZQBd@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi16ZQBpTtZQCq__T6lengthVmi0ZQmMFNaNbNdmZv@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi16ZQBpTtZQCq__T6lengthVmi0ZQmMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi16ZQBpTtZQCq__T6lengthVmi1ZQmMFNaNbNdmZv@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi16ZQBpTtZQCq__T6lengthVmi1ZQmMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi16ZQBpTtZQCq__T6lengthVmi2ZQmMFNaNbNdNfmZv@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi16ZQBpTtZQCq__T6lengthVmi2ZQmMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi16ZQBpTtZQCq__T7raw_ptrVmi0ZQnMNgFNaNbNdNiZPNgm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi16ZQBpTtZQCq__T7raw_ptrVmi1ZQnMNgFNaNbNdNiZPNgm@Base 12
+ _D3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi16ZQBpTtZQCq__T7raw_ptrVmi2ZQnMNgFNaNbNdNiZPNgm@Base 12
+ _D3std3uni__T10assumeSizeS_DQBaQz5low_8FNaNbNiNfkZkVmi8ZQBr6__initZ@Base 12
+ _D3std3uni__T10assumeSizeS_DQBaQz8midlow_8FNaNbNiNfkZkVmi8ZQBu6__initZ@Base 12
+ _D3std3uni__T10sharMethodSQyQw23switchUniformLowerBoundZ__TQBuVAyaa6_61203c3d2062TAxkTkZQCxFNaNbNiNfQskZm@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi7Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi7ZQCvZQEl10putRangeAtMFNaNbNemmbZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi7Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi7ZQCvZQEl11__xopEqualsMxFKxSQFsQFr__TQFqTbTwVii1114112TQFgTQEcTQDfZQGuZb@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi7Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi7ZQCvZQEl14ConstructState6__initZ@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi7Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi7ZQCvZQEl5buildMFNaNbNeZSQFpQFo__T4TrieTSQGfQGe__T9BitPackedTbVmi1ZQrTwVmi1114112TQGhTQFdTQEgZQCi@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi7Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi7ZQCvZQEl5putAtMFNaNbNembZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi7Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi7ZQCvZQEl6__ctorMFNaNbNcNebZSQFtQFs__TQFrTbTwVii1114112TQFhTQEdTQDgZQGv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi7Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi7ZQCvZQEl6__initZ@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi7Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi7ZQCvZQEl8putRangeMFNaNewwbZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi7Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi7ZQCvZQEl8putValueMFNaNewbZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi7Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi7ZQCvZQEl9__xtoHashFNbNeKxSQFrQFq__TQFpTbTwVii1114112TQFfTQEbTQDeZQGtZm@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi7Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi7ZQCvZQEl__T14deduceMaxIndexTQEgTQDcTQCfZQBdFNaNbNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi7Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi7ZQCvZQEl__T15spillToNextPageVmi0TSQFzQFy__T19PackedArrayViewImplTSQHfQHe__T9BitPackedTkVmi8ZQrVmi8ZQCeZQDoMFNaNbNiNeKQDgZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi7Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi7ZQCvZQEl__T15spillToNextPageVmi1TSQFzQFy__T19PackedArrayViewImplTSQHfQHe__T9BitPackedTkVmi14ZQsVmi16ZQCgZQDqMFNaNbNeKQDgZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi7Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi7ZQCvZQEl__T15spillToNextPageVmi2TSQFzQFy__T19PackedArrayViewImplTSQHfQHe__T9BitPackedTbVmi1ZQrVmi1ZQCeZQDoMFNaNbNeKQDeZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi7Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi7ZQCvZQEl__T19spillToNextPageImplVmi1TSQGdQGc__T19PackedArrayViewImplTSQHjQHi__T9BitPackedTkVmi14ZQsVmi16ZQCgZQDuMFNaNbNeKQDgZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi7Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi7ZQCvZQEl__T19spillToNextPageImplVmi2TSQGdQGc__T19PackedArrayViewImplTSQHjQHi__T9BitPackedTbVmi1ZQrVmi1ZQCeZQDsMFNaNbNeKQDeZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi7Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi7ZQCvZQEl__T3idxVmi0ZQjMFNaNbNcNdNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi7Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi7ZQCvZQEl__T3idxVmi1ZQjMFNaNbNcNdNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi7Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi7ZQCvZQEl__T3idxVmi2ZQjMFNaNbNcNdNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi7Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi7ZQCvZQEl__T8addValueVmi0TSQFrQFq__T9BitPackedTkVmi8ZQrZQBsMFNaNbNiNeQBrmZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi7Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi7ZQCvZQEl__T8addValueVmi1TSQFrQFq__T9BitPackedTkVmi14ZQsZQBtMFNaNbNeQBqmZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi7Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi7ZQCvZQEl__T8addValueVmi2TbZQqMFNaNbNebmZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi8Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi8ZQCvZQEl10putRangeAtMFNaNbNemmbZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi8Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi8ZQCvZQEl11__xopEqualsMxFKxSQFsQFr__TQFqTbTwVii1114112TQFgTQEcTQDfZQGuZb@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi8Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi8ZQCvZQEl14ConstructState6__initZ@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi8Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi8ZQCvZQEl5buildMFNaNbNeZSQFpQFo__T4TrieTSQGfQGe__T9BitPackedTbVmi1ZQrTwVmi1114112TQGhTQFdTQEgZQCi@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi8Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi8ZQCvZQEl5putAtMFNaNbNembZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi8Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi8ZQCvZQEl6__ctorMFNaNbNcNebZSQFtQFs__TQFrTbTwVii1114112TQFhTQEdTQDgZQGv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi8Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi8ZQCvZQEl6__initZ@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi8Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi8ZQCvZQEl8putRangeMFNaNewwbZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi8Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi8ZQCvZQEl8putValueMFNaNewbZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi8Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi8ZQCvZQEl9__xtoHashFNbNeKxSQFrQFq__TQFpTbTwVii1114112TQFfTQEbTQDeZQGtZm@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi8Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi8ZQCvZQEl__T14deduceMaxIndexTQEgTQDcTQCfZQBdFNaNbNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi8Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi8ZQCvZQEl__T15spillToNextPageVmi0TSQFzQFy__T19PackedArrayViewImplTSQHfQHe__T9BitPackedTkVmi8ZQrVmi8ZQCeZQDoMFNaNbNiNeKQDgZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi8Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi8ZQCvZQEl__T15spillToNextPageVmi1TSQFzQFy__T19PackedArrayViewImplTSQHfQHe__T9BitPackedTkVmi13ZQsVmi16ZQCgZQDqMFNaNbNeKQDgZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi8Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi8ZQCvZQEl__T15spillToNextPageVmi2TSQFzQFy__T19PackedArrayViewImplTSQHfQHe__T9BitPackedTbVmi1ZQrVmi1ZQCeZQDoMFNaNbNeKQDeZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi8Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi8ZQCvZQEl__T19spillToNextPageImplVmi1TSQGdQGc__T19PackedArrayViewImplTSQHjQHi__T9BitPackedTkVmi13ZQsVmi16ZQCgZQDuMFNaNbNeKQDgZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi8Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi8ZQCvZQEl__T19spillToNextPageImplVmi2TSQGdQGc__T19PackedArrayViewImplTSQHjQHi__T9BitPackedTbVmi1ZQrVmi1ZQCeZQDsMFNaNbNeKQDeZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi8Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi8ZQCvZQEl__T3idxVmi0ZQjMFNaNbNcNdNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi8Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi8ZQCvZQEl__T3idxVmi1ZQjMFNaNbNcNdNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi8Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi8ZQCvZQEl__T3idxVmi2ZQjMFNaNbNcNdNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi8Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi8ZQCvZQEl__T8addValueVmi0TSQFrQFq__T9BitPackedTkVmi8ZQrZQBsMFNaNbNiNeQBrmZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi8Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi8ZQCvZQEl__T8addValueVmi1TSQFrQFq__T9BitPackedTkVmi13ZQsZQBtMFNaNbNeQBqmZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi8Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi8ZQCvZQEl__T8addValueVmi2TbZQqMFNaNbNebmZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi9Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi9ZQCvZQEl10putRangeAtMFNaNbNemmbZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi9Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi9ZQCvZQEl11__xopEqualsMxFKxSQFsQFr__TQFqTbTwVii1114112TQFgTQEcTQDfZQGuZb@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi9Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi9ZQCvZQEl14ConstructState6__initZ@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi9Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi9ZQCvZQEl5buildMFNaNbNeZSQFpQFo__T4TrieTSQGfQGe__T9BitPackedTbVmi1ZQrTwVmi1114112TQGhTQFdTQEgZQCi@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi9Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi9ZQCvZQEl5putAtMFNaNbNembZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi9Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi9ZQCvZQEl6__ctorMFNaNbNcNebZSQFtQFs__TQFrTbTwVii1114112TQFhTQEdTQDgZQGv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi9Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi9ZQCvZQEl6__initZ@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi9Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi9ZQCvZQEl8putRangeMFNaNewwbZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi9Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi9ZQCvZQEl8putValueMFNaNewbZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi9Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi9ZQCvZQEl9__xtoHashFNbNeKxSQFrQFq__TQFpTbTwVii1114112TQFfTQEbTQDeZQGtZm@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi9Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi9ZQCvZQEl__T14deduceMaxIndexTQEgTQDcTQCfZQBdFNaNbNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi9Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi9ZQCvZQEl__T15spillToNextPageVmi0TSQFzQFy__T19PackedArrayViewImplTSQHfQHe__T9BitPackedTkVmi8ZQrVmi8ZQCeZQDoMFNaNbNiNeKQDgZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi9Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi9ZQCvZQEl__T15spillToNextPageVmi1TSQFzQFy__T19PackedArrayViewImplTSQHfQHe__T9BitPackedTkVmi12ZQsVmi16ZQCgZQDqMFNaNbNeKQDgZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi9Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi9ZQCvZQEl__T15spillToNextPageVmi2TSQFzQFy__T19PackedArrayViewImplTSQHfQHe__T9BitPackedTbVmi1ZQrVmi1ZQCeZQDoMFNaNbNeKQDeZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi9Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi9ZQCvZQEl__T19spillToNextPageImplVmi1TSQGdQGc__T19PackedArrayViewImplTSQHjQHi__T9BitPackedTkVmi12ZQsVmi16ZQCgZQDuMFNaNbNeKQDgZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi9Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi9ZQCvZQEl__T19spillToNextPageImplVmi2TSQGdQGc__T19PackedArrayViewImplTSQHjQHi__T9BitPackedTbVmi1ZQrVmi1ZQCeZQDsMFNaNbNeKQDeZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi9Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi9ZQCvZQEl__T3idxVmi0ZQjMFNaNbNcNdNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi9Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi9ZQCvZQEl__T3idxVmi1ZQjMFNaNbNcNdNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi9Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi9ZQCvZQEl__T3idxVmi2ZQjMFNaNbNcNdNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi9Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi9ZQCvZQEl__T8addValueVmi0TSQFrQFq__T9BitPackedTkVmi8ZQrZQBsMFNaNbNiNeQBrmZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi9Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi9ZQCvZQEl__T8addValueVmi1TSQFrQFq__T9BitPackedTkVmi12ZQsZQBtMFNaNbNeQBqmZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi9Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi9ZQCvZQEl__T8addValueVmi2TbZQqMFNaNbNebmZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi14Vmi21ZQvTSQCwQCv__TQBiVmi10Vmi14ZQBwTSQDyQDx__TQCkVmi6Vmi10ZQCxTSQEzQEy__TQDlVmi0Vmi6ZQDxZQFn10putRangeAtMFNaNbNemmbZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi14Vmi21ZQvTSQCwQCv__TQBiVmi10Vmi14ZQBwTSQDyQDx__TQCkVmi6Vmi10ZQCxTSQEzQEy__TQDlVmi0Vmi6ZQDxZQFn11__xopEqualsMxFKxSQGuQGt__TQGsTbTwVii1114112TQGiTQFeTQEgTQDjZQIaZb@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi14Vmi21ZQvTSQCwQCv__TQBiVmi10Vmi14ZQBwTSQDyQDx__TQCkVmi6Vmi10ZQCxTSQEzQEy__TQDlVmi0Vmi6ZQDxZQFn14ConstructState6__initZ@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi14Vmi21ZQvTSQCwQCv__TQBiVmi10Vmi14ZQBwTSQDyQDx__TQCkVmi6Vmi10ZQCxTSQEzQEy__TQDlVmi0Vmi6ZQDxZQFn5buildMFNaNbNeZSQGrQGq__T4TrieTSQHhQHg__T9BitPackedTbVmi1ZQrTwVmi1114112TQHjTQGfTQFhTQEkZQCm@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi14Vmi21ZQvTSQCwQCv__TQBiVmi10Vmi14ZQBwTSQDyQDx__TQCkVmi6Vmi10ZQCxTSQEzQEy__TQDlVmi0Vmi6ZQDxZQFn5putAtMFNaNbNembZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi14Vmi21ZQvTSQCwQCv__TQBiVmi10Vmi14ZQBwTSQDyQDx__TQCkVmi6Vmi10ZQCxTSQEzQEy__TQDlVmi0Vmi6ZQDxZQFn6__ctorMFNaNbNcNebZSQGvQGu__TQGtTbTwVii1114112TQGjTQFfTQEhTQDkZQIb@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi14Vmi21ZQvTSQCwQCv__TQBiVmi10Vmi14ZQBwTSQDyQDx__TQCkVmi6Vmi10ZQCxTSQEzQEy__TQDlVmi0Vmi6ZQDxZQFn6__initZ@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi14Vmi21ZQvTSQCwQCv__TQBiVmi10Vmi14ZQBwTSQDyQDx__TQCkVmi6Vmi10ZQCxTSQEzQEy__TQDlVmi0Vmi6ZQDxZQFn8putRangeMFNaNewwbZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi14Vmi21ZQvTSQCwQCv__TQBiVmi10Vmi14ZQBwTSQDyQDx__TQCkVmi6Vmi10ZQCxTSQEzQEy__TQDlVmi0Vmi6ZQDxZQFn8putValueMFNaNewbZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi14Vmi21ZQvTSQCwQCv__TQBiVmi10Vmi14ZQBwTSQDyQDx__TQCkVmi6Vmi10ZQCxTSQEzQEy__TQDlVmi0Vmi6ZQDxZQFn9__xtoHashFNbNeKxSQGtQGs__TQGrTbTwVii1114112TQGhTQFdTQEfTQDiZQHzZm@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi14Vmi21ZQvTSQCwQCv__TQBiVmi10Vmi14ZQBwTSQDyQDx__TQCkVmi6Vmi10ZQCxTSQEzQEy__TQDlVmi0Vmi6ZQDxZQFn__T14deduceMaxIndexTQFiTQEeTQDgTQCjZQBhFNaNbNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi14Vmi21ZQvTSQCwQCv__TQBiVmi10Vmi14ZQBwTSQDyQDx__TQCkVmi6Vmi10ZQCxTSQEzQEy__TQDlVmi0Vmi6ZQDxZQFn__T15spillToNextPageVmi0TSQHbQHa__T19PackedArrayViewImplTSQIhQIg__T9BitPackedTkVmi7ZQrVmi8ZQCeZQDoMFNaNbNiNeKQDgZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi14Vmi21ZQvTSQCwQCv__TQBiVmi10Vmi14ZQBwTSQDyQDx__TQCkVmi6Vmi10ZQCxTSQEzQEy__TQDlVmi0Vmi6ZQDxZQFn__T15spillToNextPageVmi1TSQHbQHa__T19PackedArrayViewImplTSQIhQIg__T9BitPackedTkVmi11ZQsVmi16ZQCgZQDqMFNaNbNeKQDgZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi14Vmi21ZQvTSQCwQCv__TQBiVmi10Vmi14ZQBwTSQDyQDx__TQCkVmi6Vmi10ZQCxTSQEzQEy__TQDlVmi0Vmi6ZQDxZQFn__T15spillToNextPageVmi2TSQHbQHa__T19PackedArrayViewImplTSQIhQIg__T9BitPackedTkVmi15ZQsVmi16ZQCgZQDqMFNaNbNeKQDgZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi14Vmi21ZQvTSQCwQCv__TQBiVmi10Vmi14ZQBwTSQDyQDx__TQCkVmi6Vmi10ZQCxTSQEzQEy__TQDlVmi0Vmi6ZQDxZQFn__T15spillToNextPageVmi3TSQHbQHa__T19PackedArrayViewImplTSQIhQIg__T9BitPackedTbVmi1ZQrVmi1ZQCeZQDoMFNaNbNeKQDeZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi14Vmi21ZQvTSQCwQCv__TQBiVmi10Vmi14ZQBwTSQDyQDx__TQCkVmi6Vmi10ZQCxTSQEzQEy__TQDlVmi0Vmi6ZQDxZQFn__T19spillToNextPageImplVmi1TSQHfQHe__T19PackedArrayViewImplTSQIlQIk__T9BitPackedTkVmi11ZQsVmi16ZQCgZQDuMFNaNbNeKQDgZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi14Vmi21ZQvTSQCwQCv__TQBiVmi10Vmi14ZQBwTSQDyQDx__TQCkVmi6Vmi10ZQCxTSQEzQEy__TQDlVmi0Vmi6ZQDxZQFn__T19spillToNextPageImplVmi2TSQHfQHe__T19PackedArrayViewImplTSQIlQIk__T9BitPackedTkVmi15ZQsVmi16ZQCgZQDuMFNaNbNeKQDgZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi14Vmi21ZQvTSQCwQCv__TQBiVmi10Vmi14ZQBwTSQDyQDx__TQCkVmi6Vmi10ZQCxTSQEzQEy__TQDlVmi0Vmi6ZQDxZQFn__T19spillToNextPageImplVmi3TSQHfQHe__T19PackedArrayViewImplTSQIlQIk__T9BitPackedTbVmi1ZQrVmi1ZQCeZQDsMFNaNbNeKQDeZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi14Vmi21ZQvTSQCwQCv__TQBiVmi10Vmi14ZQBwTSQDyQDx__TQCkVmi6Vmi10ZQCxTSQEzQEy__TQDlVmi0Vmi6ZQDxZQFn__T3idxVmi0ZQjMFNaNbNcNdNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi14Vmi21ZQvTSQCwQCv__TQBiVmi10Vmi14ZQBwTSQDyQDx__TQCkVmi6Vmi10ZQCxTSQEzQEy__TQDlVmi0Vmi6ZQDxZQFn__T3idxVmi1ZQjMFNaNbNcNdNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi14Vmi21ZQvTSQCwQCv__TQBiVmi10Vmi14ZQBwTSQDyQDx__TQCkVmi6Vmi10ZQCxTSQEzQEy__TQDlVmi0Vmi6ZQDxZQFn__T3idxVmi2ZQjMFNaNbNcNdNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi14Vmi21ZQvTSQCwQCv__TQBiVmi10Vmi14ZQBwTSQDyQDx__TQCkVmi6Vmi10ZQCxTSQEzQEy__TQDlVmi0Vmi6ZQDxZQFn__T3idxVmi3ZQjMFNaNbNcNdNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi14Vmi21ZQvTSQCwQCv__TQBiVmi10Vmi14ZQBwTSQDyQDx__TQCkVmi6Vmi10ZQCxTSQEzQEy__TQDlVmi0Vmi6ZQDxZQFn__T8addValueVmi0TSQGtQGs__T9BitPackedTkVmi7ZQrZQBsMFNaNbNiNeQBrmZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi14Vmi21ZQvTSQCwQCv__TQBiVmi10Vmi14ZQBwTSQDyQDx__TQCkVmi6Vmi10ZQCxTSQEzQEy__TQDlVmi0Vmi6ZQDxZQFn__T8addValueVmi1TSQGtQGs__T9BitPackedTkVmi11ZQsZQBtMFNaNbNeQBqmZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi14Vmi21ZQvTSQCwQCv__TQBiVmi10Vmi14ZQBwTSQDyQDx__TQCkVmi6Vmi10ZQCxTSQEzQEy__TQDlVmi0Vmi6ZQDxZQFn__T8addValueVmi2TSQGtQGs__T9BitPackedTkVmi15ZQsZQBtMFNaNbNeQBqmZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi14Vmi21ZQvTSQCwQCv__TQBiVmi10Vmi14ZQBwTSQDyQDx__TQCkVmi6Vmi10ZQCxTSQEzQEy__TQDlVmi0Vmi6ZQDxZQFn__T8addValueVmi3TbZQqMFNaNbNebmZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi8Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi8ZQBtZQDj10putRangeAtMFNaNbNemmbZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi8Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi8ZQBtZQDj11__xopEqualsMxFKxSQEqQEp__TQEoTbTwVii1114112TQEeTQDbZQFoZb@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi8Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi8ZQBtZQDj14ConstructState6__initZ@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi8Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi8ZQBtZQDj5buildMFNaNbNeZSQEnQEm__T4TrieTSQFdQFc__T9BitPackedTbVmi1ZQrTwVmi1114112TQFfTQEcZQCe@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi8Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi8ZQBtZQDj5putAtMFNaNbNembZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi8Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi8ZQBtZQDj6__ctorMFNaNbNcNebZSQErQEq__TQEpTbTwVii1114112TQEfTQDcZQFp@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi8Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi8ZQBtZQDj6__initZ@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi8Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi8ZQBtZQDj8putRangeMFNaNewwbZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi8Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi8ZQBtZQDj8putValueMFNaNewbZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi8Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi8ZQBtZQDj9__xtoHashFNbNeKxSQEpQEo__TQEnTbTwVii1114112TQEdTQDaZQFnZm@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi8Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi8ZQBtZQDj__T14deduceMaxIndexTQDeTQCbZQzFNaNbNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi8Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi8ZQBtZQDj__T15spillToNextPageVmi0TSQExQEw__T19PackedArrayViewImplTSQGdQGc__T9BitPackedTkVmi13ZQsVmi16ZQCgZQDqMFNaNbNiNeKQDiZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi8Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi8ZQBtZQDj__T15spillToNextPageVmi1TSQExQEw__T19PackedArrayViewImplTSQGdQGc__T9BitPackedTbVmi1ZQrVmi1ZQCeZQDoMFNaNbNeKQDeZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi8Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi8ZQBtZQDj__T19spillToNextPageImplVmi1TSQFbQFa__T19PackedArrayViewImplTSQGhQGg__T9BitPackedTbVmi1ZQrVmi1ZQCeZQDsMFNaNbNeKQDeZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi8Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi8ZQBtZQDj__T3idxVmi0ZQjMFNaNbNcNdNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi8Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi8ZQBtZQDj__T3idxVmi1ZQjMFNaNbNcNdNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi8Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi8ZQBtZQDj__T8addValueVmi0TSQEpQEo__T9BitPackedTkVmi13ZQsZQBtMFNaNbNiNeQBsmZv@Base 12
+ _D3std3uni__T11TrieBuilderTbTwVii1114112TSQBoQBn__T9sliceBitsVmi8Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi8ZQBtZQDj__T8addValueVmi1TbZQqMFNaNbNebmZv@Base 12
+ _D3std3uni__T11TrieBuilderThTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl10putRangeAtMFNaNbNemmhZv@Base 12
+ _D3std3uni__T11TrieBuilderThTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl11__xopEqualsMxFKxSQFsQFr__TQFqThTwVii1114112TQFgTQEcTQDfZQGuZb@Base 12
+ _D3std3uni__T11TrieBuilderThTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl14ConstructState6__initZ@Base 12
+ _D3std3uni__T11TrieBuilderThTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl5buildMFNaNbNeZSQFpQFo__T4TrieThTwVmi1114112TQFfTQEbTQDeZQBg@Base 12
+ _D3std3uni__T11TrieBuilderThTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl5putAtMFNaNbNemhZv@Base 12
+ _D3std3uni__T11TrieBuilderThTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl6__ctorMFNaNbNcNehZSQFtQFs__TQFrThTwVii1114112TQFhTQEdTQDgZQGv@Base 12
+ _D3std3uni__T11TrieBuilderThTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl6__initZ@Base 12
+ _D3std3uni__T11TrieBuilderThTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl8putRangeMFNaNewwhZv@Base 12
+ _D3std3uni__T11TrieBuilderThTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl8putValueMFNaNewhZv@Base 12
+ _D3std3uni__T11TrieBuilderThTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl9__xtoHashFNbNeKxSQFrQFq__TQFpThTwVii1114112TQFfTQEbTQDeZQGtZm@Base 12
+ _D3std3uni__T11TrieBuilderThTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl__T14deduceMaxIndexTQEgTQDcTQCfZQBdFNaNbNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderThTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl__T15spillToNextPageVmi0TSQFzQFy__T19PackedArrayViewImplTSQHfQHe__T9BitPackedTkVmi8ZQrVmi8ZQCeZQDoMFNaNbNiNeKQDgZv@Base 12
+ _D3std3uni__T11TrieBuilderThTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl__T15spillToNextPageVmi1TSQFzQFy__T19PackedArrayViewImplTSQHfQHe__T9BitPackedTkVmi15ZQsVmi16ZQCgZQDqMFNaNbNeKQDgZv@Base 12
+ _D3std3uni__T11TrieBuilderThTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl__T15spillToNextPageVmi2TSQFzQFy__T19PackedArrayViewImplThVmi8ZQBcZQCmMFNaNbNeKQCcZv@Base 12
+ _D3std3uni__T11TrieBuilderThTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl__T19spillToNextPageImplVmi1TSQGdQGc__T19PackedArrayViewImplTSQHjQHi__T9BitPackedTkVmi15ZQsVmi16ZQCgZQDuMFNaNbNeKQDgZv@Base 12
+ _D3std3uni__T11TrieBuilderThTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl__T19spillToNextPageImplVmi2TSQGdQGc__T19PackedArrayViewImplThVmi8ZQBcZQCqMFNaNbNeKQCcZv@Base 12
+ _D3std3uni__T11TrieBuilderThTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl__T3idxVmi0ZQjMFNaNbNcNdNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderThTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl__T3idxVmi1ZQjMFNaNbNcNdNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderThTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl__T3idxVmi2ZQjMFNaNbNcNdNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderThTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl__T8addValueVmi0TSQFrQFq__T9BitPackedTkVmi8ZQrZQBsMFNaNbNiNeQBrmZv@Base 12
+ _D3std3uni__T11TrieBuilderThTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl__T8addValueVmi1TSQFrQFq__T9BitPackedTkVmi15ZQsZQBtMFNaNbNeQBqmZv@Base 12
+ _D3std3uni__T11TrieBuilderThTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl__T8addValueVmi2ThZQqMFNaNbNehmZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi5Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi5ZQCvZQEl10putRangeAtMFNaNbNemmtZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi5Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi5ZQCvZQEl11__xopEqualsMxFKxSQFsQFr__TQFqTtTwVii1114112TQFgTQEcTQDfZQGuZb@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi5Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi5ZQCvZQEl14ConstructState6__initZ@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi5Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi5ZQCvZQEl5buildMFNaNbNeZSQFpQFo__T4TrieTtTwVmi1114112TQFfTQEbTQDeZQBg@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi5Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi5ZQCvZQEl5putAtMFNaNbNemtZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi5Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi5ZQCvZQEl6__ctorMFNaNbNcNetZSQFtQFs__TQFrTtTwVii1114112TQFhTQEdTQDgZQGv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi5Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi5ZQCvZQEl6__initZ@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi5Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi5ZQCvZQEl8putRangeMFNaNewwtZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi5Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi5ZQCvZQEl8putValueMFNaNewtZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi5Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi5ZQCvZQEl9__xtoHashFNbNeKxSQFrQFq__TQFpTtTwVii1114112TQFfTQEbTQDeZQGtZm@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi5Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi5ZQCvZQEl__T14deduceMaxIndexTQEgTQDcTQCfZQBdFNaNbNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi5Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi5ZQCvZQEl__T15spillToNextPageVmi0TSQFzQFy__T19PackedArrayViewImplTSQHfQHe__T9BitPackedTkVmi8ZQrVmi8ZQCeZQDoMFNaNbNiNeKQDgZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi5Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi5ZQCvZQEl__T15spillToNextPageVmi1TSQFzQFy__T19PackedArrayViewImplTSQHfQHe__T9BitPackedTkVmi16ZQsVmi16ZQCgZQDqMFNaNbNeKQDgZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi5Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi5ZQCvZQEl__T15spillToNextPageVmi2TSQFzQFy__T19PackedArrayViewImplTtVmi16ZQBdZQCnMFNaNbNeKQCdZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi5Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi5ZQCvZQEl__T19spillToNextPageImplVmi1TSQGdQGc__T19PackedArrayViewImplTSQHjQHi__T9BitPackedTkVmi16ZQsVmi16ZQCgZQDuMFNaNbNeKQDgZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi5Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi5ZQCvZQEl__T19spillToNextPageImplVmi2TSQGdQGc__T19PackedArrayViewImplTtVmi16ZQBdZQCrMFNaNbNeKQCdZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi5Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi5ZQCvZQEl__T3idxVmi0ZQjMFNaNbNcNdNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi5Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi5ZQCvZQEl__T3idxVmi1ZQjMFNaNbNcNdNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi5Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi5ZQCvZQEl__T3idxVmi2ZQjMFNaNbNcNdNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi5Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi5ZQCvZQEl__T8addValueVmi0TSQFrQFq__T9BitPackedTkVmi8ZQrZQBsMFNaNbNiNeQBrmZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi5Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi5ZQCvZQEl__T8addValueVmi1TSQFrQFq__T9BitPackedTkVmi16ZQsZQBtMFNaNbNeQBqmZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi5Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi5ZQCvZQEl__T8addValueVmi2TtZQqMFNaNbNetmZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl10putRangeAtMFNaNbNemmtZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl11__xopEqualsMxFKxSQFsQFr__TQFqTtTwVii1114112TQFgTQEcTQDfZQGuZb@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl14ConstructState6__initZ@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl5buildMFNaNbNeZSQFpQFo__T4TrieTtTwVmi1114112TQFfTQEbTQDeZQBg@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl5putAtMFNaNbNemtZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl6__ctorMFNaNbNcNetZSQFtQFs__TQFrTtTwVii1114112TQFhTQEdTQDgZQGv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl6__initZ@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl8putRangeMFNaNewwtZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl8putValueMFNaNewtZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl9__xtoHashFNbNeKxSQFrQFq__TQFpTtTwVii1114112TQFfTQEbTQDeZQGtZm@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl__T14deduceMaxIndexTQEgTQDcTQCfZQBdFNaNbNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl__T15spillToNextPageVmi0TSQFzQFy__T19PackedArrayViewImplTSQHfQHe__T9BitPackedTkVmi8ZQrVmi8ZQCeZQDoMFNaNbNiNeKQDgZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl__T15spillToNextPageVmi1TSQFzQFy__T19PackedArrayViewImplTSQHfQHe__T9BitPackedTkVmi15ZQsVmi16ZQCgZQDqMFNaNbNeKQDgZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl__T15spillToNextPageVmi2TSQFzQFy__T19PackedArrayViewImplTtVmi16ZQBdZQCnMFNaNbNeKQCdZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl__T19spillToNextPageImplVmi1TSQGdQGc__T19PackedArrayViewImplTSQHjQHi__T9BitPackedTkVmi15ZQsVmi16ZQCgZQDuMFNaNbNeKQDgZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl__T19spillToNextPageImplVmi2TSQGdQGc__T19PackedArrayViewImplTtVmi16ZQBdZQCrMFNaNbNeKQCdZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl__T3idxVmi0ZQjMFNaNbNcNdNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl__T3idxVmi1ZQjMFNaNbNcNdNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl__T3idxVmi2ZQjMFNaNbNcNdNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl__T8addValueVmi0TSQFrQFq__T9BitPackedTkVmi8ZQrZQBsMFNaNbNiNeQBrmZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl__T8addValueVmi1TSQFrQFq__T9BitPackedTkVmi15ZQsZQBtMFNaNbNeQBqmZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi13Vmi21ZQvTSQCwQCv__TQBiVmi6Vmi13ZQBvTSQDxQDw__TQCjVmi0Vmi6ZQCvZQEl__T8addValueVmi2TtZQqMFNaNbNetmZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi9Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi9ZQBtZQDj10putRangeAtMFNaNbNemmtZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi9Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi9ZQBtZQDj11__xopEqualsMxFKxSQEqQEp__TQEoTtTwVii1114112TQEeTQDbZQFoZb@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi9Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi9ZQBtZQDj14ConstructState6__initZ@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi9Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi9ZQBtZQDj5buildMFNaNbNeZSQEnQEm__T4TrieTtTwVmi1114112TQEdTQDaZQBc@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi9Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi9ZQBtZQDj5putAtMFNaNbNemtZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi9Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi9ZQBtZQDj6__ctorMFNaNbNcNetZSQErQEq__TQEpTtTwVii1114112TQEfTQDcZQFp@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi9Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi9ZQBtZQDj6__initZ@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi9Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi9ZQBtZQDj8putRangeMFNaNewwtZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi9Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi9ZQBtZQDj8putValueMFNaNewtZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi9Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi9ZQBtZQDj9__xtoHashFNbNeKxSQEpQEo__TQEnTtTwVii1114112TQEdTQDaZQFnZm@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi9Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi9ZQBtZQDj__T14deduceMaxIndexTQDeTQCbZQzFNaNbNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi9Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi9ZQBtZQDj__T15spillToNextPageVmi0TSQExQEw__T19PackedArrayViewImplTSQGdQGc__T9BitPackedTkVmi12ZQsVmi16ZQCgZQDqMFNaNbNiNeKQDiZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi9Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi9ZQBtZQDj__T15spillToNextPageVmi1TSQExQEw__T19PackedArrayViewImplTtVmi16ZQBdZQCnMFNaNbNeKQCdZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi9Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi9ZQBtZQDj__T19spillToNextPageImplVmi1TSQFbQFa__T19PackedArrayViewImplTtVmi16ZQBdZQCrMFNaNbNeKQCdZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi9Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi9ZQBtZQDj__T3idxVmi0ZQjMFNaNbNcNdNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi9Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi9ZQBtZQDj__T3idxVmi1ZQjMFNaNbNcNdNiNeZm@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi9Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi9ZQBtZQDj__T8addValueVmi0TSQEpQEo__T9BitPackedTkVmi12ZQsZQBtMFNaNbNiNeQBsmZv@Base 12
+ _D3std3uni__T11TrieBuilderTtTwVii1114112TSQBoQBn__T9sliceBitsVmi9Vmi21ZQuTSQCvQCu__TQBhVmi0Vmi9ZQBtZQDj__T8addValueVmi1TtZQqMFNaNbNetmZv@Base 12
+ _D3std3uni__T11copyForwardTiTkZQsFNaNbNiNfAiAkZv@Base 12
+ _D3std3uni__T11copyForwardTkTkZQsFNaNbNiNfAkQcZv@Base 12
+ _D3std3uni__T11copyForwardTmTmZQsFNaNbNiNfAmQcZv@Base 12
+ _D3std3uni__T11findSetNameS_DQBb8internal14unicode_tables6blocks3tabFNaNdNfZAySQCzQByQBs15UnicodePropertyTaZQDrFNaNfMxAaZb@Base 12
+ _D3std3uni__T11findSetNameS_DQBb8internal14unicode_tables7scripts3tabFNaNbNdNiNfZAySQDeQCdQBx15UnicodePropertyTaZQDwFNaNfMxAaZb@Base 12
+ _D3std3uni__T11findSetNameS_DQBb8internal14unicode_tables8uniProps3tabFNaNdNfZAySQDbQCaQBu15UnicodePropertyTaZQDtFNaNfMxAaZb@Base 12
+ _D3std3uni__T11memoizeExprVAyaa91_756e69636f64652e416c7068616265746963207c20756e69636f64652e4d6e207c20756e69636f64652e4d630a20202020202020207c20756e69636f64652e4d65207c20756e69636f64652e4e64207c20756e69636f64652e5063ZQHwFNfZSQIpQIo__T13InversionListTSQJpQJo8GcPolicyZQBh@Base 12
+ _D3std3uni__T11memoizeExprVAyaa91_756e69636f64652e416c7068616265746963207c20756e69636f64652e4d6e207c20756e69636f64652e4d630a20202020202020207c20756e69636f64652e4d65207c20756e69636f64652e4e64207c20756e69636f64652e5063ZQHwFZ11initializedb@Base 12
+ _D3std3uni__T11memoizeExprVAyaa91_756e69636f64652e416c7068616265746963207c20756e69636f64652e4d6e207c20756e69636f64652e4d630a20202020202020207c20756e69636f64652e4d65207c20756e69636f64652e4e64207c20756e69636f64652e5063ZQHwFZ4slotSQIsQIr__T13InversionListTSQJsQJr8GcPolicyZQBh@Base 12
+ _D3std3uni__T11parseUniHexTAyaZQsFNaNfKQmmZw@Base 12
+ _D3std3uni__T11parseUniHexTSQBaQz__T16UnicodeSetParserTSQCc5regex8internal6parser__T6ParserTAyaTSQDrQBpQBmQBg7CodeGenZQBiZQDiZQEjFNaNfKQEemZw@Base 12
+ _D3std3uni__T12fullCasedCmpTAxwZQtFNaNbNiNfwwKQsZi@Base 12
+ _D3std3uni__T12fullCasedCmpTSQBb3utf__T5byUTFTwVEQBv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDiTSQEyQDx__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQFkFNcQCeZ6ResultZQHcFNaNbNiNfwwKQHcZi@Base 12
+ _D3std3uni__T12fullCasedCmpTSQBb3utf__T5byUTFTwVEQBv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDiTSQEyQDx__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImplZQFkFNcQCeZ6ResultZQHcFNaNbNiNfwwKQHcZi@Base 12
+ _D3std3uni__T12loadPropertyTSQBbQBa__T13InversionListTSQCbQCa8GcPolicyZQBhTaZQCmFNaNfMxAaKQCkZb@Base 12
+ _D3std3uni__T12mapTrieIndexTSQBbQBa__T9sliceBitsVmi13Vmi21ZQvTSQCjQCi__TQBiVmi5Vmi13ZQBvTSQDkQDj__TQCjVmi0Vmi5ZQCvZ__TQEbTiZQEhFNaNbNiNfiZm@Base 12
+ _D3std3uni__T12mapTrieIndexTSQBbQBa__T9sliceBitsVmi13Vmi21ZQvTSQCjQCi__TQBiVmi5Vmi13ZQBvTSQDkQDj__TQCjVmi0Vmi5ZQCvZ__TQEbTwZQEhFNaNbNiNfwZm@Base 12
+ _D3std3uni__T12mapTrieIndexTSQBbQBa__T9sliceBitsVmi13Vmi21ZQvTSQCjQCi__TQBiVmi6Vmi13ZQBvTSQDkQDj__TQCjVmi0Vmi6ZQCvZ__TQEbTiZQEhFNaNbNiNfiZm@Base 12
+ _D3std3uni__T12mapTrieIndexTSQBbQBa__T9sliceBitsVmi13Vmi21ZQvTSQCjQCi__TQBiVmi6Vmi13ZQBvTSQDkQDj__TQCjVmi0Vmi6ZQCvZ__TQEbTwZQEhFNaNbNiNfwZm@Base 12
+ _D3std3uni__T12mapTrieIndexTSQBbQBa__T9sliceBitsVmi13Vmi21ZQvTSQCjQCi__TQBiVmi7Vmi13ZQBvTSQDkQDj__TQCjVmi0Vmi7ZQCvZ__TQEbTiZQEhFNaNbNiNfiZm@Base 12
+ _D3std3uni__T12mapTrieIndexTSQBbQBa__T9sliceBitsVmi13Vmi21ZQvTSQCjQCi__TQBiVmi7Vmi13ZQBvTSQDkQDj__TQCjVmi0Vmi7ZQCvZ__TQEbTwZQEhFNaNbNiNfwZm@Base 12
+ _D3std3uni__T12mapTrieIndexTSQBbQBa__T9sliceBitsVmi13Vmi21ZQvTSQCjQCi__TQBiVmi8Vmi13ZQBvTSQDkQDj__TQCjVmi0Vmi8ZQCvZ__TQEbTiZQEhFNaNbNiNfiZm@Base 12
+ _D3std3uni__T12mapTrieIndexTSQBbQBa__T9sliceBitsVmi13Vmi21ZQvTSQCjQCi__TQBiVmi8Vmi13ZQBvTSQDkQDj__TQCjVmi0Vmi8ZQCvZ__TQEbTwZQEhFNaNbNiNfwZm@Base 12
+ _D3std3uni__T12mapTrieIndexTSQBbQBa__T9sliceBitsVmi13Vmi21ZQvTSQCjQCi__TQBiVmi9Vmi13ZQBvTSQDkQDj__TQCjVmi0Vmi9ZQCvZ__TQEbTiZQEhFNaNbNiNfiZm@Base 12
+ _D3std3uni__T12mapTrieIndexTSQBbQBa__T9sliceBitsVmi13Vmi21ZQvTSQCjQCi__TQBiVmi9Vmi13ZQBvTSQDkQDj__TQCjVmi0Vmi9ZQCvZ__TQEbTwZQEhFNaNbNiNfwZm@Base 12
+ _D3std3uni__T12mapTrieIndexTSQBbQBa__T9sliceBitsVmi14Vmi21ZQvTSQCjQCi__TQBiVmi10Vmi14ZQBwTSQDlQDk__TQCkVmi6Vmi10ZQCxTSQEmQEl__TQDlVmi0Vmi6ZQDxZ__TQFdTiZQFjFNaNbNiNfiZm@Base 12
+ _D3std3uni__T12mapTrieIndexTSQBbQBa__T9sliceBitsVmi14Vmi21ZQvTSQCjQCi__TQBiVmi10Vmi14ZQBwTSQDlQDk__TQCkVmi6Vmi10ZQCxTSQEmQEl__TQDlVmi0Vmi6ZQDxZ__TQFdTwZQFjFNaNbNiNfwZm@Base 12
+ _D3std3uni__T12mapTrieIndexTSQBbQBa__T9sliceBitsVmi8Vmi21ZQuTSQCiQCh__TQBhVmi0Vmi8ZQBtZ__TQCzTiZQDfFNaNbNiNfiZm@Base 12
+ _D3std3uni__T12mapTrieIndexTSQBbQBa__T9sliceBitsVmi8Vmi21ZQuTSQCiQCh__TQBhVmi0Vmi8ZQBtZ__TQCzTwZQDfFNaNbNiNfwZm@Base 12
+ _D3std3uni__T12mapTrieIndexTSQBbQBa__T9sliceBitsVmi9Vmi21ZQuTSQCiQCh__TQBhVmi0Vmi9ZQBtZ__TQCzTiZQDfFNaNbNiNfiZm@Base 12
+ _D3std3uni__T12mapTrieIndexTSQBbQBa__T9sliceBitsVmi9Vmi21ZQuTSQCiQCh__TQBhVmi0Vmi9ZQBtZ__TQCzTwZQDfFNaNbNiNfwZm@Base 12
+ _D3std3uni__T12toCaseLengthS_DQBcQBb12toLowerIndexFNaNbNiNewZtVki1043S_DQCsQCr10toLowerTabFNaNbNiNemZwZ__TQDpTaZQDvFNaNfMxAaZm@Base 12
+ _D3std3uni__T12toCaseLengthS_DQBcQBb12toLowerIndexFNaNbNiNewZtVki1043S_DQCsQCr10toLowerTabFNaNbNiNemZwZ__TQDpTuZQDvFNaNfMxAuZm@Base 12
+ _D3std3uni__T12toCaseLengthS_DQBcQBb12toLowerIndexFNaNbNiNewZtVki1043S_DQCsQCr10toLowerTabFNaNbNiNemZwZ__TQDpTwZQDvFNaNfMxAwZm@Base 12
+ _D3std3uni__T12toCaseLengthS_DQBcQBb12toUpperIndexFNaNbNiNewZtVki1051S_DQCsQCr10toUpperTabFNaNbNiNemZwZ__TQDpTaZQDvFNaNfMxAaZm@Base 12
+ _D3std3uni__T12toCaseLengthS_DQBcQBb12toUpperIndexFNaNbNiNewZtVki1051S_DQCsQCr10toUpperTabFNaNbNiNemZwZ__TQDpTuZQDvFNaNfMxAuZm@Base 12
+ _D3std3uni__T12toCaseLengthS_DQBcQBb12toUpperIndexFNaNbNiNewZtVki1051S_DQCsQCr10toUpperTabFNaNbNiNemZwZ__TQDpTwZQDvFNaNfMxAwZm@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh10byIntervalMFNaNbNdNlNfZSQCvQCu__TQCtTQChZQDb__T9IntervalsTAkZQo@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh11__fieldDtorMFNaNbNiNeZv@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh11__xopEqualsMxFKxSQCoQCn__TQCmTQCaZQCuZb@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh11addIntervalMFNaNbNlNfiimZm@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh11byCodepointMFNaNbNdNfZSQCuQCt__TQCsTQCgZQDaQBtMFNdNfZ14CodepointRange@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh11byCodepointMFNdNfZ14CodepointRange11__xopEqualsMxFKxSQDyQDx__TQDwTQDkZQEeQCxMFNdNfZQCnZb@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh11byCodepointMFNdNfZ14CodepointRange5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh11byCodepointMFNdNfZ14CodepointRange5frontMxFNaNbNdNiNfZw@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh11byCodepointMFNdNfZ14CodepointRange6__ctorMFNaNbNcNfSQDxQDw__TQDvTQDjZQEdZSQEtQEs__TQErTQEfZQEzQDsMFNdNfZQDi@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh11byCodepointMFNdNfZ14CodepointRange6__initZ@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh11byCodepointMFNdNfZ14CodepointRange8popFrontMFNaNbNiNfZv@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh11byCodepointMFNdNfZ14CodepointRange9__xtoHashFNbNeKxSQDxQDw__TQDvTQDjZQEdQCwMFNdNfZQCmZm@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh12toSourceCodeFNfAxSQCpQCo17CodepointIntervalAyaZQe@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh12toSourceCodeFNfAxSQCpQCo17CodepointIntervalAyaZ__T11binaryScopeTQBxZQsFNfQCgQBhZQBl@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh12toSourceCodeFNfAxSQCpQCo17CodepointIntervalAyaZ__T11linearScopeTQBxZQsFNaNfQCiQBjZQBn@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh12toSourceCodeFNfAxSQCpQCo17CodepointIntervalAyaZ__T6bisectTQBrZQmFNfQCamQBcZQBg@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh12toSourceCodeMFNfAyaZQe@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh15__fieldPostblitMFNaNbNiNlNeZv@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh6__initZ@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh6lengthMFNaNbNdNfZm@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh7opIndexMxFNaNbNiNfkZb@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh7subCharMFNaNbNcNfwZSQCqQCp__TQCoTQCcZQCw@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh8dropUpToMFNaNbNfkmZm@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh8invertedMFNaNbNdNfZSQCqQCp__TQCoTQCcZQCw@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh8opAssignMFNaNbNcNiNjNeSQCtQCs__TQCrTQCfZQCzZQw@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh8sanitizeMFNaNfZv@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh8skipUpToMFNaNbNfkmZm@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh9__xtoHashFNbNeKxSQCnQCm__TQClTQBzZQCtZm@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh9intervalsMxFNaNbNdNfZAxSQCuQCt17CodepointInterval@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T10opOpAssignVAyaa1_26TSQCvQCu__TQCtTQChZQDbZQBsMFNaNbNcNfQBjZQBn@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T10opOpAssignVAyaa1_2dTSQCvQCu__TQCtTQChZQDbZQBsMFNaNbNcNfQBjZQBn@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T10opOpAssignVAyaa1_7cTSQCvQCu__TQCtTQChZQDbZQBsMFNaNbNcNfQBjZQBn@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T10opOpAssignVAyaa1_7cTkZQyMFNaNbNcNfkZSQDlQDk__TQDjTQCxZQDr@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T10opOpAssignVAyaa1_7cTwZQyMFNaNbNcNfwZSQDlQDk__TQDjTQCxZQDr@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T10opOpAssignVAyaa1_7eTSQCvQCu__TQCtTQChZQDbZQBsMFNaNbNcNfQBjZQBn@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T13fromIntervalsTSQCpQCo21DecompressedIntervalsZQBvFNaNfQBnZSQEgQEf__TQEeTQDsZQEm@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T13fromIntervalsZQqFNaNbNfAkXSQDbQDa__TQCzTQCnZQDh@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T3addTSQCeQCd__TQCcTQBqZQCkZQBbMFNaNbNcNfQBjZQBn@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T3addZQfMFNaNbNcNfkkZSQCtQCs__TQCrTQCfZQCz@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T3subTSQCeQCd__TQCcTQBqZQCkZQBbMFNaNbNcNfQBjZQBn@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T6__ctorTSQChQCg__TQCfTQBtZQCnZQBeMFNaNbNcNfQBjZQBn@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T7scanForZQjMxFNaNbNiNfwZm@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T8opBinaryVAyaa1_26TSQCsQCr__TQCqTQCeZQCyZQBpMFNaNbNfQBhZQBl@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T8opBinaryVAyaa1_7cTSQCsQCr__TQCqTQCeZQCyZQBpMFNaNbNfQBhZQBl@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAkZQo11__xopEqualsMxFKxSQDhQDg__TQDfTQCtZQDn__TQCgTQBzZQCoZb@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAkZQo13opIndexAssignMFNaNbNiNfSQDoQDn17CodepointIntervalmZv@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAkZQo4backMFNaNbNdNiNfSQDgQDf17CodepointIntervalZv@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAkZQo4backMxFNaNbNdNiNfZSQDiQDh17CodepointInterval@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAkZQo4saveMFNaNbNdNiNfZSQDhQDg__TQDfTQCtZQDn__TQCgTQBzZQCo@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAkZQo5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAkZQo5frontMFNaNbNdNiNfSQDhQDg17CodepointIntervalZv@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAkZQo5frontMxFNaNbNdNiNfZSQDjQDi17CodepointInterval@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAkZQo6__ctorMFNaNbNcNiNlNfQBaZSQDoQDn__TQDmTQDaZQDu__TQCnTQCgZQCv@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAkZQo6__ctorMFNaNbNcNiNlNfQBammZSQDqQDp__TQDoTQDcZQDw__TQCpTQCiZQCx@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAkZQo6__initZ@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAkZQo6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAkZQo7opIndexMxFNaNbNiNfmZSQDkQDj17CodepointInterval@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAkZQo7opSliceMFNaNbNiNfmmZSQDkQDj__TQDiTQCwZQDq__TQCjTQCcZQCr@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAkZQo7popBackMFNaNbNiNfZv@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAkZQo8popFrontMFNaNbNiNfZv@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAkZQo9__xtoHashFNbNeKxSQDgQDf__TQDeTQCsZQDm__TQCfTQByZQCnZm@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAxkZQp11__xopEqualsMxFKxSQDiQDh__TQDgTQCuZQDo__TQChTQCaZQCpZb@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAxkZQp4backMxFNaNbNdNiNfZSQDjQDi17CodepointInterval@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAxkZQp4saveMFNaNbNdNiNfZSQDiQDh__TQDgTQCuZQDo__TQChTQCaZQCp@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAxkZQp5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAxkZQp5frontMxFNaNbNdNiNfZSQDkQDj17CodepointInterval@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAxkZQp6__ctorMFNaNbNcNiNlNfQBbZSQDpQDo__TQDnTQDbZQDv__TQCoTQChZQCw@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAxkZQp6__ctorMFNaNbNcNiNlNfQBbmmZSQDrQDq__TQDpTQDdZQDx__TQCqTQCjZQCy@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAxkZQp6__initZ@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAxkZQp6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAxkZQp7opIndexMxFNaNbNiNfmZSQDlQDk17CodepointInterval@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAxkZQp7opSliceMFNaNbNiNfmmZSQDlQDk__TQDjTQCxZQDr__TQCkTQCdZQCs@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAxkZQp7popBackMFNaNbNiNfZv@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAxkZQp8popFrontMFNaNbNiNfZv@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAxkZQp9__xtoHashFNbNeKxSQDhQDg__TQDfTQCtZQDn__TQCgTQBzZQCoZm@Base 12
+ _D3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9intersectTSQCkQCj__TQCiTQBwZQCqZQBhMFNaNbNcNfQBjZQBn@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTbVmi1ZQrVmi1ZQBy11simpleIndexMNgFNaNbNimZQCk@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTbVmi1ZQrVmi1ZQBy11simpleWriteMFNaNbNibmZv@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTbVmi1ZQrVmi1ZQBy13opIndexAssignMFNaNbNiQCimZv@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTbVmi1ZQrVmi1ZQBy13opIndexAssignMFNaNbNibmZv@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTbVmi1ZQrVmi1ZQBy6__ctorMNgFNaNbNcNiNfPNgmZNgSQDpQDo__TQDnTQDbVmi1ZQDz@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTbVmi1ZQrVmi1ZQBy6__initZ@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTbVmi1ZQrVmi1ZQBy7opIndexMNgFNaNbNimZQCf@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi11ZQsVmi16ZQCa11simpleIndexMNgFNaNbNimZQCm@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi11ZQsVmi16ZQCa11simpleWriteMFNaNbNikmZv@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi11ZQsVmi16ZQCa13opIndexAssignMFNaNbNiQCkmZv@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi11ZQsVmi16ZQCa13opIndexAssignMFNaNbNikmZv@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi11ZQsVmi16ZQCa6__ctorMNgFNaNbNcNiNfPNgmZNgSQDrQDq__TQDpTQDdVmi16ZQEc@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi11ZQsVmi16ZQCa6__initZ@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi11ZQsVmi16ZQCa7opIndexMNgFNaNbNimZQCh@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi12ZQsVmi16ZQCa11simpleIndexMNgFNaNbNimZQCm@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi12ZQsVmi16ZQCa11simpleWriteMFNaNbNikmZv@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi12ZQsVmi16ZQCa13opIndexAssignMFNaNbNiQCkmZv@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi12ZQsVmi16ZQCa13opIndexAssignMFNaNbNikmZv@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi12ZQsVmi16ZQCa6__ctorMNgFNaNbNcNiNfPNgmZNgSQDrQDq__TQDpTQDdVmi16ZQEc@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi12ZQsVmi16ZQCa6__initZ@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi12ZQsVmi16ZQCa7opIndexMNgFNaNbNimZQCh@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi13ZQsVmi16ZQCa11simpleIndexMNgFNaNbNimZQCm@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi13ZQsVmi16ZQCa11simpleWriteMFNaNbNikmZv@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi13ZQsVmi16ZQCa13opIndexAssignMFNaNbNiQCkmZv@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi13ZQsVmi16ZQCa13opIndexAssignMFNaNbNikmZv@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi13ZQsVmi16ZQCa6__ctorMNgFNaNbNcNiNfPNgmZNgSQDrQDq__TQDpTQDdVmi16ZQEc@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi13ZQsVmi16ZQCa6__initZ@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi13ZQsVmi16ZQCa7opIndexMNgFNaNbNimZQCh@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi14ZQsVmi16ZQCa11simpleIndexMNgFNaNbNimZQCm@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi14ZQsVmi16ZQCa11simpleWriteMFNaNbNikmZv@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi14ZQsVmi16ZQCa13opIndexAssignMFNaNbNiQCkmZv@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi14ZQsVmi16ZQCa13opIndexAssignMFNaNbNikmZv@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi14ZQsVmi16ZQCa6__ctorMNgFNaNbNcNiNfPNgmZNgSQDrQDq__TQDpTQDdVmi16ZQEc@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi14ZQsVmi16ZQCa6__initZ@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi14ZQsVmi16ZQCa7opIndexMNgFNaNbNimZQCh@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi15ZQsVmi16ZQCa11simpleIndexMNgFNaNbNimZQCm@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi15ZQsVmi16ZQCa11simpleWriteMFNaNbNikmZv@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi15ZQsVmi16ZQCa13opIndexAssignMFNaNbNiQCkmZv@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi15ZQsVmi16ZQCa13opIndexAssignMFNaNbNikmZv@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi15ZQsVmi16ZQCa6__ctorMNgFNaNbNcNiNfPNgmZNgSQDrQDq__TQDpTQDdVmi16ZQEc@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi15ZQsVmi16ZQCa6__initZ@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi15ZQsVmi16ZQCa7opIndexMNgFNaNbNimZQCh@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi16ZQsVmi16ZQCa11simpleIndexMNgFNaNbNimZQCm@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi16ZQsVmi16ZQCa11simpleWriteMFNaNbNikmZv@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi16ZQsVmi16ZQCa13opIndexAssignMFNaNbNiQCkmZv@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi16ZQsVmi16ZQCa13opIndexAssignMFNaNbNikmZv@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi16ZQsVmi16ZQCa6__ctorMNgFNaNbNcNiNfPNgmZNgSQDrQDq__TQDpTQDdVmi16ZQEc@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi16ZQsVmi16ZQCa6__initZ@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi16ZQsVmi16ZQCa7opIndexMNgFNaNbNimZQCh@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi7ZQrVmi8ZQBy11simpleIndexMNgFNaNbNimZQCk@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi7ZQrVmi8ZQBy11simpleWriteMFNaNbNikmZv@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi7ZQrVmi8ZQBy13opIndexAssignMFNaNbNiQCimZv@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi7ZQrVmi8ZQBy13opIndexAssignMFNaNbNikmZv@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi7ZQrVmi8ZQBy6__ctorMNgFNaNbNcNiNfPNgmZNgSQDpQDo__TQDnTQDbVmi8ZQDz@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi7ZQrVmi8ZQBy6__initZ@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi7ZQrVmi8ZQBy7opIndexMNgFNaNbNimZQCf@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi8ZQrVmi8ZQBy11simpleIndexMNgFNaNbNimZQCk@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi8ZQrVmi8ZQBy11simpleWriteMFNaNbNikmZv@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi8ZQrVmi8ZQBy13opIndexAssignMFNaNbNiQCimZv@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi8ZQrVmi8ZQBy13opIndexAssignMFNaNbNikmZv@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi8ZQrVmi8ZQBy6__ctorMNgFNaNbNcNiNfPNgmZNgSQDpQDo__TQDnTQDbVmi8ZQDz@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi8ZQrVmi8ZQBy6__initZ@Base 12
+ _D3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi8ZQrVmi8ZQBy7opIndexMNgFNaNbNimZQCf@Base 12
+ _D3std3uni__T13PackedPtrImplThVmi8ZQw11simpleIndexMNgFNaNbNimZh@Base 12
+ _D3std3uni__T13PackedPtrImplThVmi8ZQw11simpleWriteMFNaNbNihmZv@Base 12
+ _D3std3uni__T13PackedPtrImplThVmi8ZQw13opIndexAssignMFNaNbNihmZv@Base 12
+ _D3std3uni__T13PackedPtrImplThVmi8ZQw6__ctorMNgFNaNbNcNiNfPNgmZNgSQCmQCl__TQCkThVmi8ZQCu@Base 12
+ _D3std3uni__T13PackedPtrImplThVmi8ZQw6__initZ@Base 12
+ _D3std3uni__T13PackedPtrImplThVmi8ZQw7opIndexMNgFNaNbNimZh@Base 12
+ _D3std3uni__T13PackedPtrImplTtVmi16ZQx11simpleIndexMNgFNaNbNimZt@Base 12
+ _D3std3uni__T13PackedPtrImplTtVmi16ZQx11simpleWriteMFNaNbNitmZv@Base 12
+ _D3std3uni__T13PackedPtrImplTtVmi16ZQx13opIndexAssignMFNaNbNitmZv@Base 12
+ _D3std3uni__T13PackedPtrImplTtVmi16ZQx6__ctorMNgFNaNbNcNiNfPNgmZNgSQCnQCm__TQClTtVmi16ZQCw@Base 12
+ _D3std3uni__T13PackedPtrImplTtVmi16ZQx6__initZ@Base 12
+ _D3std3uni__T13PackedPtrImplTtVmi16ZQx7opIndexMNgFNaNbNimZt@Base 12
+ _D3std3uni__T13copyBackwardsTkTkZQuFNaNbNiNfAkQcZv@Base 12
+ _D3std3uni__T13copyBackwardsTmTmZQuFNaNbNiNfAmQcZv@Base 12
+ _D3std3uni__T13replicateBitsVmi1Vmi64ZQzFNaNbNiNfmZm@Base 12
+ _D3std3uni__T13replicateBitsVmi2Vmi32ZQzFNaNbNiNfmZm@Base 12
+ _D3std3uni__T13replicateBitsVmi4Vmi16ZQzFNaNbNiNfmZm@Base 12
+ _D3std3uni__T13replicateBitsVmi64Vmi1ZQzFNaNbNiNfmZm@Base 12
+ _D3std3uni__T13replicateBitsVmi8Vmi8ZQyFNaNbNiNfmZm@Base 12
+ _D3std3uni__T13toCaseInPlaceS_DQBdQBc12toLowerIndexFNaNbNiNewZtVii1043S_DQCtQCs10toLowerTabFNaNbNiNemZwTaZQDpFNaNeKAaZ6moveToFNaNbNiNfQtmmmZm@Base 12
+ _D3std3uni__T13toCaseInPlaceS_DQBdQBc12toLowerIndexFNaNbNiNewZtVii1043S_DQCtQCs10toLowerTabFNaNbNiNemZwTaZQDpFNaNeKAaZv@Base 12
+ _D3std3uni__T13toCaseInPlaceS_DQBdQBc12toLowerIndexFNaNbNiNewZtVii1043S_DQCtQCs10toLowerTabFNaNbNiNemZwTuZQDpFNaNeKAuZ6moveToFNaNbNiNfQtmmmZm@Base 12
+ _D3std3uni__T13toCaseInPlaceS_DQBdQBc12toLowerIndexFNaNbNiNewZtVii1043S_DQCtQCs10toLowerTabFNaNbNiNemZwTuZQDpFNaNeKAuZv@Base 12
+ _D3std3uni__T13toCaseInPlaceS_DQBdQBc12toLowerIndexFNaNbNiNewZtVii1043S_DQCtQCs10toLowerTabFNaNbNiNemZwTwZQDpFNaNeKAwZ6moveToFNaNbNiNfQtmmmZm@Base 12
+ _D3std3uni__T13toCaseInPlaceS_DQBdQBc12toLowerIndexFNaNbNiNewZtVii1043S_DQCtQCs10toLowerTabFNaNbNiNemZwTwZQDpFNaNeKAwZv@Base 12
+ _D3std3uni__T13toCaseInPlaceS_DQBdQBc12toUpperIndexFNaNbNiNewZtVii1051S_DQCtQCs10toUpperTabFNaNbNiNemZwTaZQDpFNaNeKAaZ6moveToFNaNbNiNfQtmmmZm@Base 12
+ _D3std3uni__T13toCaseInPlaceS_DQBdQBc12toUpperIndexFNaNbNiNewZtVii1051S_DQCtQCs10toUpperTabFNaNbNiNemZwTaZQDpFNaNeKAaZv@Base 12
+ _D3std3uni__T13toCaseInPlaceS_DQBdQBc12toUpperIndexFNaNbNiNewZtVii1051S_DQCtQCs10toUpperTabFNaNbNiNemZwTuZQDpFNaNeKAuZ6moveToFNaNbNiNfQtmmmZm@Base 12
+ _D3std3uni__T13toCaseInPlaceS_DQBdQBc12toUpperIndexFNaNbNiNewZtVii1051S_DQCtQCs10toUpperTabFNaNbNiNemZwTuZQDpFNaNeKAuZv@Base 12
+ _D3std3uni__T13toCaseInPlaceS_DQBdQBc12toUpperIndexFNaNbNiNewZtVii1051S_DQCtQCs10toUpperTabFNaNbNiNemZwTwZQDpFNaNeKAwZ6moveToFNaNbNiNfQtmmmZm@Base 12
+ _D3std3uni__T13toCaseInPlaceS_DQBdQBc12toUpperIndexFNaNbNiNewZtVii1051S_DQCtQCs10toUpperTabFNaNbNiNemZwTwZQDpFNaNeKAwZv@Base 12
+ _D3std3uni__T14findUnicodeSetS_DQBe8internal14unicode_tables6blocks3tabFNaNdNfZAySQDcQByQBs15UnicodePropertyTaZQDuFNaNfMxAaZl@Base 12
+ _D3std3uni__T14findUnicodeSetS_DQBe8internal14unicode_tables7scripts3tabFNaNbNdNiNfZAySQDhQCdQBx15UnicodePropertyTaZQDzFNaNfMxAaZl@Base 12
+ _D3std3uni__T14findUnicodeSetS_DQBe8internal14unicode_tables8uniProps3tabFNaNdNfZAySQDeQCaQBu15UnicodePropertyTaZQDwFNaNfMxAaZl@Base 12
+ _D3std3uni__T14genericReplaceTvTSQBfQBe__T8CowArrayTSQBzQBy8GcPolicyZQBbTAiZQClFNaNbNeKQCdmmQtZm@Base 12
+ _D3std3uni__T14genericReplaceTvTSQBfQBe__T8CowArrayTSQBzQBy8GcPolicyZQBbTAkZQClFNaNbNeKQCdmmQtZm@Base 12
+ _D3std3uni__T14graphemeStrideTaZQtFNaNfMxAamZm@Base 12
+ _D3std3uni__T14graphemeStrideTwZQtFNaNbNiNfMxAwmZm@Base 12
+ _D3std3uni__T14loadUnicodeSetS_DQBe8internal14unicode_tables6blocks3tabFNaNdNfZAySQDcQByQBs15UnicodePropertyTSQEeQEd__T13InversionListTSQFeQFd8GcPolicyZQBhTaZQFpFNaNfMxAaKQCkZb@Base 12
+ _D3std3uni__T14loadUnicodeSetS_DQBe8internal14unicode_tables7scripts3tabFNaNbNdNiNfZAySQDhQCdQBx15UnicodePropertyTSQEjQEi__T13InversionListTSQFjQFi8GcPolicyZQBhTaZQFuFNaNfMxAaKQCkZb@Base 12
+ _D3std3uni__T14loadUnicodeSetS_DQBe8internal14unicode_tables8uniProps3tabFNaNdNfZAySQDeQCaQBu15UnicodePropertyTSQEgQEf__T13InversionListTSQFgQFf8GcPolicyZQBhTaZQFrFNaNfMxAaKQCkZb@Base 12
+ _D3std3uni__T14toLowerInPlaceTaZQtFNaNeKAaZv@Base 12
+ _D3std3uni__T14toLowerInPlaceTuZQtFNaNeKAuZv@Base 12
+ _D3std3uni__T14toLowerInPlaceTwZQtFNaNeKAwZv@Base 12
+ _D3std3uni__T14toUpperInPlaceTaZQtFNaNeKAaZv@Base 12
+ _D3std3uni__T14toUpperInPlaceTuZQtFNaNeKAuZv@Base 12
+ _D3std3uni__T14toUpperInPlaceTwZQtFNaNeKAwZv@Base 12
+ _D3std3uni__T15packedArrayViewTSQBeQBd__T9BitPackedTbVmi1ZQrZQBwFNaNbNiNfPNgmmZNgSQDcQDb__T19PackedArrayViewImplTQDeVmi1ZQBe@Base 12
+ _D3std3uni__T15packedArrayViewTSQBeQBd__T9BitPackedTkVmi11ZQsZQBxFNaNbNiNfPNgmmZNgSQDdQDc__T19PackedArrayViewImplTQDfVmi16ZQBf@Base 12
+ _D3std3uni__T15packedArrayViewTSQBeQBd__T9BitPackedTkVmi12ZQsZQBxFNaNbNiNfPNgmmZNgSQDdQDc__T19PackedArrayViewImplTQDfVmi16ZQBf@Base 12
+ _D3std3uni__T15packedArrayViewTSQBeQBd__T9BitPackedTkVmi13ZQsZQBxFNaNbNiNfPNgmmZNgSQDdQDc__T19PackedArrayViewImplTQDfVmi16ZQBf@Base 12
+ _D3std3uni__T15packedArrayViewTSQBeQBd__T9BitPackedTkVmi14ZQsZQBxFNaNbNiNfPNgmmZNgSQDdQDc__T19PackedArrayViewImplTQDfVmi16ZQBf@Base 12
+ _D3std3uni__T15packedArrayViewTSQBeQBd__T9BitPackedTkVmi15ZQsZQBxFNaNbNiNfPNgmmZNgSQDdQDc__T19PackedArrayViewImplTQDfVmi16ZQBf@Base 12
+ _D3std3uni__T15packedArrayViewTSQBeQBd__T9BitPackedTkVmi16ZQsZQBxFNaNbNiNfPNgmmZNgSQDdQDc__T19PackedArrayViewImplTQDfVmi16ZQBf@Base 12
+ _D3std3uni__T15packedArrayViewTSQBeQBd__T9BitPackedTkVmi7ZQrZQBwFNaNbNiNfPNgmmZNgSQDcQDb__T19PackedArrayViewImplTQDeVmi8ZQBe@Base 12
+ _D3std3uni__T15packedArrayViewTSQBeQBd__T9BitPackedTkVmi8ZQrZQBwFNaNbNiNfPNgmmZNgSQDcQDb__T19PackedArrayViewImplTQDeVmi8ZQBe@Base 12
+ _D3std3uni__T15packedArrayViewThZQuFNaNbNiNfPNgmmZNgSQBzQBy__T19PackedArrayViewImplThVmi8ZQBc@Base 12
+ _D3std3uni__T15packedArrayViewTtZQuFNaNbNiNfPNgmmZNgSQBzQBy__T19PackedArrayViewImplTtVmi16ZQBd@Base 12
+ _D3std3uni__T16SliceOverIndexedTSQBfQBe8GraphemeZQBk11__xopEqualsMxFKxSQCrQCq__TQCpTQCaZQCxZb@Base 12
+ _D3std3uni__T16SliceOverIndexedTSQBfQBe8GraphemeZQBk13opIndexAssignMFNaNbNiNfwmZv@Base 12
+ _D3std3uni__T16SliceOverIndexedTSQBfQBe8GraphemeZQBk4backMFNaNbNdNiNfwZv@Base 12
+ _D3std3uni__T16SliceOverIndexedTSQBfQBe8GraphemeZQBk4backMxFNaNbNdNiNfZw@Base 12
+ _D3std3uni__T16SliceOverIndexedTSQBfQBe8GraphemeZQBk4saveMNgFNaNbNdNiNfZNgSQCvQCu__TQCtTQCeZQDb@Base 12
+ _D3std3uni__T16SliceOverIndexedTSQBfQBe8GraphemeZQBk5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std3uni__T16SliceOverIndexedTSQBfQBe8GraphemeZQBk5frontMFNaNbNdNiNfwZv@Base 12
+ _D3std3uni__T16SliceOverIndexedTSQBfQBe8GraphemeZQBk5frontMxFNaNbNdNiNfZw@Base 12
+ _D3std3uni__T16SliceOverIndexedTSQBfQBe8GraphemeZQBk6__initZ@Base 12
+ _D3std3uni__T16SliceOverIndexedTSQBfQBe8GraphemeZQBk6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T16SliceOverIndexedTSQBfQBe8GraphemeZQBk7opIndexMxFNaNbNiNfmZw@Base 12
+ _D3std3uni__T16SliceOverIndexedTSQBfQBe8GraphemeZQBk7opSliceMFNaNbNiNfZSQCsQCr__TQCqTQCbZQCy@Base 12
+ _D3std3uni__T16SliceOverIndexedTSQBfQBe8GraphemeZQBk7opSliceMFNaNbNiNfmmZSQCuQCt__TQCsTQCdZQDa@Base 12
+ _D3std3uni__T16SliceOverIndexedTSQBfQBe8GraphemeZQBk7popBackMFNaNbNiNfZv@Base 12
+ _D3std3uni__T16SliceOverIndexedTSQBfQBe8GraphemeZQBk8popFrontMFNaNbNiNfZv@Base 12
+ _D3std3uni__T16SliceOverIndexedTSQBfQBe8GraphemeZQBk__T8opEqualsTxSQCnQCm__TQClTQBwZQCtZQBhMxFNaNbNiNfKxQBmZb@Base 12
+ _D3std3uni__T16UnicodeSetParserTSQBf5regex8internal6parser__T6ParserTAyaTSQCuQBpQBmQBg7CodeGenZQBiZQDi11__xopEqualsMxFKxSQEpQEo__TQEnTQDyZQEvZb@Base 12
+ _D3std3uni__T16UnicodeSetParserTSQBf5regex8internal6parser__T6ParserTAyaTSQCuQBpQBmQBg7CodeGenZQBiZQDi13parseCharTermMFNfZSQEr8typecons__T5TupleTSQFoQFn__T13InversionListTSQGoQGn8GcPolicyZQBhTEQHjQHi__TQHhTQGsZQHp8OperatorZQDh@Base 12
+ _D3std3uni__T16UnicodeSetParserTSQBf5regex8internal6parser__T6ParserTAyaTSQCuQBpQBmQBg7CodeGenZQBiZQDi13parseCharTermMFZ18twinSymbolOperatorFNaNbNiNfwZEQFuQFt__TQFsTQFdZQGa8Operator@Base 12
+ _D3std3uni__T16UnicodeSetParserTSQBf5regex8internal6parser__T6ParserTAyaTSQCuQBpQBmQBg7CodeGenZQBiZQDi5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std3uni__T16UnicodeSetParserTSQBf5regex8internal6parser__T6ParserTAyaTSQCuQBpQBmQBg7CodeGenZQBiZQDi5frontMFNaNbNdNiNfZw@Base 12
+ _D3std3uni__T16UnicodeSetParserTSQBf5regex8internal6parser__T6ParserTAyaTSQCuQBpQBmQBg7CodeGenZQBiZQDi6__initZ@Base 12
+ _D3std3uni__T16UnicodeSetParserTSQBf5regex8internal6parser__T6ParserTAyaTSQCuQBpQBmQBg7CodeGenZQBiZQDi8parseSetMFNfZSQElQEk__T13InversionListTSQFlQFk8GcPolicyZQBh@Base 12
+ _D3std3uni__T16UnicodeSetParserTSQBf5regex8internal6parser__T6ParserTAyaTSQCuQBpQBmQBg7CodeGenZQBiZQDi8parseSetMFZ5applyFNfEQEsQEr__TQEqTQEbZQEy8OperatorKSQFxQFw__T5StackTSQGoQGn__T13InversionListTSQHoQHn8GcPolicyZQBhZQCcZb@Base 12
+ _D3std3uni__T16UnicodeSetParserTSQBf5regex8internal6parser__T6ParserTAyaTSQCuQBpQBmQBg7CodeGenZQBiZQDi8parseSetMFZ__T11unrollWhileSQEz10functional__T8unaryFunVQDma11_6120213d20612e4f70656eVQEqa1_61ZQBxZQDhFNfKSQIaQHz__T5StackTSQIrQIq__T13InversionListTSQJrQJq8GcPolicyZQBhZQCcKSQKqQKp__TQCqTEQLeQLd__TQLcTQKnZQLk8OperatorZQDzZb@Base 12
+ _D3std3uni__T16UnicodeSetParserTSQBf5regex8internal6parser__T6ParserTAyaTSQCuQBpQBmQBg7CodeGenZQBiZQDi8parseSetMFZ__T11unrollWhileSQEz10functional__T8unaryFunVQDma12_61203d3d20612e556e696f6eVQEsa1_61ZQBzZQDjFNfKSQIcQIb__T5StackTSQItQIs__T13InversionListTSQJtQJs8GcPolicyZQBhZQCcKSQKsQKr__TQCqTEQLgQLf__TQLeTQKpZQLm8OperatorZQDzZb@Base 12
+ _D3std3uni__T16UnicodeSetParserTSQBf5regex8internal6parser__T6ParserTAyaTSQCuQBpQBmQBg7CodeGenZQBiZQDi8popFrontMFNaNfZv@Base 12
+ _D3std3uni__T16UnicodeSetParserTSQBf5regex8internal6parser__T6ParserTAyaTSQCuQBpQBmQBg7CodeGenZQBiZQDi9__xtoHashFNbNeKxSQEoQEn__TQEmTQDxZQEuZm@Base 12
+ _D3std3uni__T16codepointSetTrieVii13Vii8Z__TQBfTSQBvQBu__T13InversionListTSQCvQCu8GcPolicyZQBhZQDeFNaNfQCdZSQEcQEb__T4TrieTSQEsQEr__T9BitPackedTbVmi1ZQrTwVmi1114112TSQGiQGh__T9sliceBitsVmi8Vmi21ZQuTSQHpQHo__TQBhVmi0Vmi8ZQBtZQEd@Base 12
+ _D3std3uni__T16propertyNameLessTaTaZQxFNaNfAxaQdZb@Base 12
+ _D3std3uni__T16sliceOverIndexedTSQBfQBe8GraphemeZQBkFNaNbNiNfmmPQBgZSQCpQCo__T16SliceOverIndexedTQCnZQx@Base 12
+ _D3std3uni__T18toCaseInPlaceAllocS_DQBiQBh12toLowerIndexFNaNbNiNewZtVki1043S_DQCyQCx10toLowerTabFNaNbNiNemZwZ__TQDvTaZQEbFNaNeKAammZv@Base 12
+ _D3std3uni__T18toCaseInPlaceAllocS_DQBiQBh12toLowerIndexFNaNbNiNewZtVki1043S_DQCyQCx10toLowerTabFNaNbNiNemZwZ__TQDvTuZQEbFNaNeKAummZv@Base 12
+ _D3std3uni__T18toCaseInPlaceAllocS_DQBiQBh12toLowerIndexFNaNbNiNewZtVki1043S_DQCyQCx10toLowerTabFNaNbNiNemZwZ__TQDvTwZQEbFNaNeKAwmmZv@Base 12
+ _D3std3uni__T18toCaseInPlaceAllocS_DQBiQBh12toUpperIndexFNaNbNiNewZtVki1051S_DQCyQCx10toUpperTabFNaNbNiNemZwZ__TQDvTaZQEbFNaNeKAammZv@Base 12
+ _D3std3uni__T18toCaseInPlaceAllocS_DQBiQBh12toUpperIndexFNaNbNiNewZtVki1051S_DQCyQCx10toUpperTabFNaNbNiNemZwZ__TQDvTuZQEbFNaNeKAummZv@Base 12
+ _D3std3uni__T18toCaseInPlaceAllocS_DQBiQBh12toUpperIndexFNaNbNiNewZtVki1051S_DQCyQCx10toUpperTabFNaNbNiNemZwZ__TQDvTwZQEbFNaNeKAwmmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTbVmi1ZQrVmi1ZQCe11__xopEqualsMxFKxSQDlQDk__TQDjTQCrVmi1ZQDvZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTbVmi1ZQrVmi1ZQCe13opIndexAssignMFNaNbNiQCimZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTbVmi1ZQrVmi1ZQCe13opIndexAssignMFNaNbNibmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTbVmi1ZQrVmi1ZQCe13opSliceAssignMFNaNbNiQCimmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTbVmi1ZQrVmi1ZQCe13opSliceAssignMFNaNbNibmmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTbVmi1ZQrVmi1ZQCe5zerosMFNaNbNimmZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTbVmi1ZQrVmi1ZQCe6__ctorMNgFNaNbNcNiNfPNgmmmZNgSQDxQDw__TQDvTQDdVmi1ZQEh@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTbVmi1ZQrVmi1ZQCe6__initZ@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTbVmi1ZQrVmi1ZQCe6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTbVmi1ZQrVmi1ZQCe7opIndexMNgFNaNbNimZQCf@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTbVmi1ZQrVmi1ZQCe7opSliceMFNaNbNiNfZSQDmQDl__TQDkTQCsVmi1ZQDw@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTbVmi1ZQrVmi1ZQCe7opSliceMNgFNaNbNiNfmmZNgSQDsQDr__TQDqTQCyVmi1ZQEc@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTbVmi1ZQrVmi1ZQCe__T7roundUpZQjMFNaNbNiNfmZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTbVmi1ZQrVmi1ZQCe__T8opEqualsTxSQDhQDg__TQDfTQCnVmi1ZQDrZQBlMxFNaNbNiKxQBoZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTbVmi1ZQrVmi1ZQCe__T9roundDownZQlMFNaNbNiNfmZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi11ZQsVmi16ZQCg11__xopEqualsMxFKxSQDnQDm__TQDlTQCtVmi16ZQDyZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi11ZQsVmi16ZQCg13opIndexAssignMFNaNbNiQCkmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi11ZQsVmi16ZQCg13opIndexAssignMFNaNbNikmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi11ZQsVmi16ZQCg13opSliceAssignMFNaNbNiQCkmmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi11ZQsVmi16ZQCg13opSliceAssignMFNaNbNikmmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi11ZQsVmi16ZQCg5zerosMFNaNbNimmZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi11ZQsVmi16ZQCg6__ctorMNgFNaNbNcNiNfPNgmmmZNgSQDzQDy__TQDxTQDfVmi16ZQEk@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi11ZQsVmi16ZQCg6__initZ@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi11ZQsVmi16ZQCg6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi11ZQsVmi16ZQCg7opIndexMNgFNaNbNimZQCh@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi11ZQsVmi16ZQCg7opSliceMFNaNbNiNfZSQDoQDn__TQDmTQCuVmi16ZQDz@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi11ZQsVmi16ZQCg7opSliceMNgFNaNbNiNfmmZNgSQDuQDt__TQDsTQDaVmi16ZQEf@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi11ZQsVmi16ZQCg__T7roundUpZQjMFNaNbNiNfmZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi11ZQsVmi16ZQCg__T8opEqualsTxSQDjQDi__TQDhTQCpVmi16ZQDuZQBmMxFNaNbNiKxQBpZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi11ZQsVmi16ZQCg__T9roundDownZQlMFNaNbNiNfmZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi12ZQsVmi16ZQCg11__xopEqualsMxFKxSQDnQDm__TQDlTQCtVmi16ZQDyZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi12ZQsVmi16ZQCg13opIndexAssignMFNaNbNiQCkmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi12ZQsVmi16ZQCg13opIndexAssignMFNaNbNikmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi12ZQsVmi16ZQCg13opSliceAssignMFNaNbNiQCkmmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi12ZQsVmi16ZQCg13opSliceAssignMFNaNbNikmmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi12ZQsVmi16ZQCg5zerosMFNaNbNimmZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi12ZQsVmi16ZQCg6__ctorMNgFNaNbNcNiNfPNgmmmZNgSQDzQDy__TQDxTQDfVmi16ZQEk@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi12ZQsVmi16ZQCg6__initZ@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi12ZQsVmi16ZQCg6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi12ZQsVmi16ZQCg7opIndexMNgFNaNbNimZQCh@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi12ZQsVmi16ZQCg7opSliceMFNaNbNiNfZSQDoQDn__TQDmTQCuVmi16ZQDz@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi12ZQsVmi16ZQCg7opSliceMNgFNaNbNiNfmmZNgSQDuQDt__TQDsTQDaVmi16ZQEf@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi12ZQsVmi16ZQCg__T7roundUpZQjMFNaNbNiNfmZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi12ZQsVmi16ZQCg__T8opEqualsTxSQDjQDi__TQDhTQCpVmi16ZQDuZQBmMxFNaNbNiKxQBpZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi12ZQsVmi16ZQCg__T9roundDownZQlMFNaNbNiNfmZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi13ZQsVmi16ZQCg11__xopEqualsMxFKxSQDnQDm__TQDlTQCtVmi16ZQDyZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi13ZQsVmi16ZQCg13opIndexAssignMFNaNbNiQCkmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi13ZQsVmi16ZQCg13opIndexAssignMFNaNbNikmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi13ZQsVmi16ZQCg13opSliceAssignMFNaNbNiQCkmmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi13ZQsVmi16ZQCg13opSliceAssignMFNaNbNikmmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi13ZQsVmi16ZQCg5zerosMFNaNbNimmZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi13ZQsVmi16ZQCg6__ctorMNgFNaNbNcNiNfPNgmmmZNgSQDzQDy__TQDxTQDfVmi16ZQEk@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi13ZQsVmi16ZQCg6__initZ@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi13ZQsVmi16ZQCg6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi13ZQsVmi16ZQCg7opIndexMNgFNaNbNimZQCh@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi13ZQsVmi16ZQCg7opSliceMFNaNbNiNfZSQDoQDn__TQDmTQCuVmi16ZQDz@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi13ZQsVmi16ZQCg7opSliceMNgFNaNbNiNfmmZNgSQDuQDt__TQDsTQDaVmi16ZQEf@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi13ZQsVmi16ZQCg__T7roundUpZQjMFNaNbNiNfmZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi13ZQsVmi16ZQCg__T8opEqualsTxSQDjQDi__TQDhTQCpVmi16ZQDuZQBmMxFNaNbNiKxQBpZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi13ZQsVmi16ZQCg__T9roundDownZQlMFNaNbNiNfmZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi14ZQsVmi16ZQCg11__xopEqualsMxFKxSQDnQDm__TQDlTQCtVmi16ZQDyZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi14ZQsVmi16ZQCg13opIndexAssignMFNaNbNiQCkmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi14ZQsVmi16ZQCg13opIndexAssignMFNaNbNikmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi14ZQsVmi16ZQCg13opSliceAssignMFNaNbNiQCkmmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi14ZQsVmi16ZQCg13opSliceAssignMFNaNbNikmmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi14ZQsVmi16ZQCg5zerosMFNaNbNimmZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi14ZQsVmi16ZQCg6__ctorMNgFNaNbNcNiNfPNgmmmZNgSQDzQDy__TQDxTQDfVmi16ZQEk@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi14ZQsVmi16ZQCg6__initZ@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi14ZQsVmi16ZQCg6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi14ZQsVmi16ZQCg7opIndexMNgFNaNbNimZQCh@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi14ZQsVmi16ZQCg7opSliceMFNaNbNiNfZSQDoQDn__TQDmTQCuVmi16ZQDz@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi14ZQsVmi16ZQCg7opSliceMNgFNaNbNiNfmmZNgSQDuQDt__TQDsTQDaVmi16ZQEf@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi14ZQsVmi16ZQCg__T7roundUpZQjMFNaNbNiNfmZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi14ZQsVmi16ZQCg__T8opEqualsTxSQDjQDi__TQDhTQCpVmi16ZQDuZQBmMxFNaNbNiKxQBpZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi14ZQsVmi16ZQCg__T9roundDownZQlMFNaNbNiNfmZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi15ZQsVmi16ZQCg11__xopEqualsMxFKxSQDnQDm__TQDlTQCtVmi16ZQDyZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi15ZQsVmi16ZQCg13opIndexAssignMFNaNbNiQCkmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi15ZQsVmi16ZQCg13opIndexAssignMFNaNbNikmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi15ZQsVmi16ZQCg13opSliceAssignMFNaNbNiQCkmmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi15ZQsVmi16ZQCg13opSliceAssignMFNaNbNikmmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi15ZQsVmi16ZQCg5zerosMFNaNbNimmZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi15ZQsVmi16ZQCg6__ctorMNgFNaNbNcNiNfPNgmmmZNgSQDzQDy__TQDxTQDfVmi16ZQEk@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi15ZQsVmi16ZQCg6__initZ@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi15ZQsVmi16ZQCg6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi15ZQsVmi16ZQCg7opIndexMNgFNaNbNimZQCh@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi15ZQsVmi16ZQCg7opSliceMFNaNbNiNfZSQDoQDn__TQDmTQCuVmi16ZQDz@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi15ZQsVmi16ZQCg7opSliceMNgFNaNbNiNfmmZNgSQDuQDt__TQDsTQDaVmi16ZQEf@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi15ZQsVmi16ZQCg__T7roundUpZQjMFNaNbNiNfmZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi15ZQsVmi16ZQCg__T8opEqualsTxSQDjQDi__TQDhTQCpVmi16ZQDuZQBmMxFNaNbNiKxQBpZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi15ZQsVmi16ZQCg__T9roundDownZQlMFNaNbNiNfmZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi16ZQsVmi16ZQCg11__xopEqualsMxFKxSQDnQDm__TQDlTQCtVmi16ZQDyZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi16ZQsVmi16ZQCg13opIndexAssignMFNaNbNiQCkmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi16ZQsVmi16ZQCg13opIndexAssignMFNaNbNikmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi16ZQsVmi16ZQCg13opSliceAssignMFNaNbNiQCkmmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi16ZQsVmi16ZQCg13opSliceAssignMFNaNbNikmmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi16ZQsVmi16ZQCg5zerosMFNaNbNimmZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi16ZQsVmi16ZQCg6__ctorMNgFNaNbNcNiNfPNgmmmZNgSQDzQDy__TQDxTQDfVmi16ZQEk@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi16ZQsVmi16ZQCg6__initZ@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi16ZQsVmi16ZQCg6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi16ZQsVmi16ZQCg7opIndexMNgFNaNbNimZQCh@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi16ZQsVmi16ZQCg7opSliceMFNaNbNiNfZSQDoQDn__TQDmTQCuVmi16ZQDz@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi16ZQsVmi16ZQCg7opSliceMNgFNaNbNiNfmmZNgSQDuQDt__TQDsTQDaVmi16ZQEf@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi16ZQsVmi16ZQCg__T7roundUpZQjMFNaNbNiNfmZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi16ZQsVmi16ZQCg__T8opEqualsTxSQDjQDi__TQDhTQCpVmi16ZQDuZQBmMxFNaNbNiKxQBpZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi16ZQsVmi16ZQCg__T9roundDownZQlMFNaNbNiNfmZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi7ZQrVmi8ZQCe11__xopEqualsMxFKxSQDlQDk__TQDjTQCrVmi8ZQDvZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi7ZQrVmi8ZQCe13opIndexAssignMFNaNbNiQCimZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi7ZQrVmi8ZQCe13opIndexAssignMFNaNbNikmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi7ZQrVmi8ZQCe13opSliceAssignMFNaNbNiQCimmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi7ZQrVmi8ZQCe13opSliceAssignMFNaNbNikmmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi7ZQrVmi8ZQCe5zerosMFNaNbNimmZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi7ZQrVmi8ZQCe6__ctorMNgFNaNbNcNiNfPNgmmmZNgSQDxQDw__TQDvTQDdVmi8ZQEh@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi7ZQrVmi8ZQCe6__initZ@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi7ZQrVmi8ZQCe6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi7ZQrVmi8ZQCe7opIndexMNgFNaNbNimZQCf@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi7ZQrVmi8ZQCe7opSliceMFNaNbNiNfZSQDmQDl__TQDkTQCsVmi8ZQDw@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi7ZQrVmi8ZQCe7opSliceMNgFNaNbNiNfmmZNgSQDsQDr__TQDqTQCyVmi8ZQEc@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi7ZQrVmi8ZQCe__T7roundUpZQjMFNaNbNiNfmZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi7ZQrVmi8ZQCe__T8opEqualsTxSQDhQDg__TQDfTQCnVmi8ZQDrZQBlMxFNaNbNiKxQBoZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi7ZQrVmi8ZQCe__T9roundDownZQlMFNaNbNiNfmZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi8ZQrVmi8ZQCe11__xopEqualsMxFKxSQDlQDk__TQDjTQCrVmi8ZQDvZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi8ZQrVmi8ZQCe13opIndexAssignMFNaNbNiQCimZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi8ZQrVmi8ZQCe13opIndexAssignMFNaNbNikmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi8ZQrVmi8ZQCe13opSliceAssignMFNaNbNiQCimmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi8ZQrVmi8ZQCe13opSliceAssignMFNaNbNikmmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi8ZQrVmi8ZQCe5zerosMFNaNbNimmZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi8ZQrVmi8ZQCe6__ctorMNgFNaNbNcNiNfPNgmmmZNgSQDxQDw__TQDvTQDdVmi8ZQEh@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi8ZQrVmi8ZQCe6__initZ@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi8ZQrVmi8ZQCe6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi8ZQrVmi8ZQCe7opIndexMNgFNaNbNimZQCf@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi8ZQrVmi8ZQCe7opSliceMFNaNbNiNfZSQDmQDl__TQDkTQCsVmi8ZQDw@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi8ZQrVmi8ZQCe7opSliceMNgFNaNbNiNfmmZNgSQDsQDr__TQDqTQCyVmi8ZQEc@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi8ZQrVmi8ZQCe__T7roundUpZQjMFNaNbNiNfmZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi8ZQrVmi8ZQCe__T8opEqualsTxSQDhQDg__TQDfTQCnVmi8ZQDrZQBlMxFNaNbNiKxQBoZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi8ZQrVmi8ZQCe__T9roundDownZQlMFNaNbNiNfmZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplThVmi8ZQBc11__xopEqualsMxFKxSQCjQCi__TQChThVmi8ZQCrZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplThVmi8ZQBc13opIndexAssignMFNaNbNihmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplThVmi8ZQBc13opSliceAssignMFNaNbNihmmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplThVmi8ZQBc5zerosMFNaNbNimmZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplThVmi8ZQBc6__ctorMNgFNaNbNcNiNfPNgmmmZNgSQCvQCu__TQCtThVmi8ZQDd@Base 12
+ _D3std3uni__T19PackedArrayViewImplThVmi8ZQBc6__initZ@Base 12
+ _D3std3uni__T19PackedArrayViewImplThVmi8ZQBc6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplThVmi8ZQBc7opIndexMNgFNaNbNimZh@Base 12
+ _D3std3uni__T19PackedArrayViewImplThVmi8ZQBc7opSliceMFNaNbNiNfZSQCkQCj__TQCiThVmi8ZQCs@Base 12
+ _D3std3uni__T19PackedArrayViewImplThVmi8ZQBc7opSliceMNgFNaNbNiNfmmZNgSQCqQCp__TQCoThVmi8ZQCy@Base 12
+ _D3std3uni__T19PackedArrayViewImplThVmi8ZQBc__T7roundUpZQjMFNaNbNiNfmZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplThVmi8ZQBc__T8opEqualsTxSQCfQCe__TQCdThVmi8ZQCnZQBjMxFNaNbNiKxQBmZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplThVmi8ZQBc__T9roundDownZQlMFNaNbNiNfmZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplTtVmi16ZQBd11__xopEqualsMxFKxSQCkQCj__TQCiTtVmi16ZQCtZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplTtVmi16ZQBd13opIndexAssignMFNaNbNitmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTtVmi16ZQBd13opSliceAssignMFNaNbNitmmZv@Base 12
+ _D3std3uni__T19PackedArrayViewImplTtVmi16ZQBd5zerosMFNaNbNimmZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplTtVmi16ZQBd6__ctorMNgFNaNbNcNiNfPNgmmmZNgSQCwQCv__TQCuTtVmi16ZQDf@Base 12
+ _D3std3uni__T19PackedArrayViewImplTtVmi16ZQBd6__initZ@Base 12
+ _D3std3uni__T19PackedArrayViewImplTtVmi16ZQBd6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplTtVmi16ZQBd7opIndexMNgFNaNbNimZt@Base 12
+ _D3std3uni__T19PackedArrayViewImplTtVmi16ZQBd7opSliceMFNaNbNiNfZSQClQCk__TQCjTtVmi16ZQCu@Base 12
+ _D3std3uni__T19PackedArrayViewImplTtVmi16ZQBd7opSliceMNgFNaNbNiNfmmZNgSQCrQCq__TQCpTtVmi16ZQDa@Base 12
+ _D3std3uni__T19PackedArrayViewImplTtVmi16ZQBd__T7roundUpZQjMFNaNbNiNfmZm@Base 12
+ _D3std3uni__T19PackedArrayViewImplTtVmi16ZQBd__T8opEqualsTxSQCgQCf__TQCeTtVmi16ZQCpZQBkMxFNaNbNiKxQBnZb@Base 12
+ _D3std3uni__T19PackedArrayViewImplTtVmi16ZQBd__T9roundDownZQlMFNaNbNiNfmZm@Base 12
+ _D3std3uni__T19comparePropertyNameTaTaZQBaFNaNfAxaQdZ4predFNaNbNiNfwZb@Base 12
+ _D3std3uni__T19comparePropertyNameTaTaZQBaFNaNfAxaQdZi@Base 12
+ _D3std3uni__T20isPrettyPropertyNameTaZQzFNaNfMxAaZb@Base 12
+ _D3std3uni__T21genericDecodeGraphemeVbi0Z__TQBfTAxaZQBnFNaNfKQnZv@Base 12
+ _D3std3uni__T21genericDecodeGraphemeVbi0Z__TQBfTAxwZQBnFNaNbNiNfKQrZv@Base 12
+ _D3std3uni__T23switchUniformLowerBoundSQBl10functional__T9binaryFunVAyaa6_61203c3d2062VQta1_61VQBba1_62ZQBvTAxkTkZQDxFNaNbNiNfQskZm@Base 12
+ _D3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi13Vmi21ZQvTSQDoQDn__TQBiVmi7Vmi13ZQBvTSQEpQEo__TQCjVmi0Vmi7ZQCvZQFd11__xopEqualsMxFKxSQGkQGj__TQGiTQGgTwVmi1114112TQFiTQEeTQDhZQHoZb@Base 12
+ _D3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi13Vmi21ZQvTSQDoQDn__TQBiVmi7Vmi13ZQBvTSQEpQEo__TQCjVmi0Vmi7ZQCvZQFd6__initZ@Base 12
+ _D3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi13Vmi21ZQvTSQDoQDn__TQBiVmi7Vmi13ZQBvTSQEpQEo__TQCjVmi0Vmi7ZQCvZQFd9__xtoHashFNbNeKxSQGjQGi__TQGhTQGfTwVmi1114112TQFhTQEdTQDgZQHnZm@Base 12
+ _D3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi13Vmi21ZQvTSQDoQDn__TQBiVmi7Vmi13ZQBvTSQEpQEo__TQCjVmi0Vmi7ZQCvZQFd__T6__ctorZQiMFNaNbNcNiNeSQGrQGq__T10MultiArrayTSQHoQHn__TQGyTkVmi8ZQHiTSQImQIl__TQHwTkVmi14ZQIhTQItZQCoZSQJtQJs__TQJrTQJpTwVmi1114112TQIrTQHnTQGqZQKx@Base 12
+ _D3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi13Vmi21ZQvTSQDoQDn__TQBiVmi7Vmi13ZQBvTSQEpQEo__TQCjVmi0Vmi7ZQCvZQFd__T6__ctorZQiMxFNaNbNcNiNeAxmQdQfZxSQHbQHa__TQGzTQGxTwVmi1114112TQFzTQEvTQDyZQIf@Base 12
+ _D3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi13Vmi21ZQvTSQDoQDn__TQBiVmi7Vmi13ZQBvTSQEpQEo__TQCjVmi0Vmi7ZQCvZQFd__T7opIndexZQjMxFNaNbNiNewZb@Base 12
+ _D3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi13Vmi21ZQvTSQDoQDn__TQBiVmi8Vmi13ZQBvTSQEpQEo__TQCjVmi0Vmi8ZQCvZQFd11__xopEqualsMxFKxSQGkQGj__TQGiTQGgTwVmi1114112TQFiTQEeTQDhZQHoZb@Base 12
+ _D3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi13Vmi21ZQvTSQDoQDn__TQBiVmi8Vmi13ZQBvTSQEpQEo__TQCjVmi0Vmi8ZQCvZQFd6__initZ@Base 12
+ _D3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi13Vmi21ZQvTSQDoQDn__TQBiVmi8Vmi13ZQBvTSQEpQEo__TQCjVmi0Vmi8ZQCvZQFd9__xtoHashFNbNeKxSQGjQGi__TQGhTQGfTwVmi1114112TQFhTQEdTQDgZQHnZm@Base 12
+ _D3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi13Vmi21ZQvTSQDoQDn__TQBiVmi8Vmi13ZQBvTSQEpQEo__TQCjVmi0Vmi8ZQCvZQFd__T6__ctorZQiMFNaNbNcNiNeSQGrQGq__T10MultiArrayTSQHoQHn__TQGyTkVmi8ZQHiTSQImQIl__TQHwTkVmi13ZQIhTQItZQCoZSQJtQJs__TQJrTQJpTwVmi1114112TQIrTQHnTQGqZQKx@Base 12
+ _D3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi13Vmi21ZQvTSQDoQDn__TQBiVmi8Vmi13ZQBvTSQEpQEo__TQCjVmi0Vmi8ZQCvZQFd__T6__ctorZQiMxFNaNbNcNiNeAxmQdQfZxSQHbQHa__TQGzTQGxTwVmi1114112TQFzTQEvTQDyZQIf@Base 12
+ _D3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi13Vmi21ZQvTSQDoQDn__TQBiVmi8Vmi13ZQBvTSQEpQEo__TQCjVmi0Vmi8ZQCvZQFd__T7opIndexZQjMxFNaNbNiNewZb@Base 12
+ _D3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi13Vmi21ZQvTSQDoQDn__TQBiVmi9Vmi13ZQBvTSQEpQEo__TQCjVmi0Vmi9ZQCvZQFd11__xopEqualsMxFKxSQGkQGj__TQGiTQGgTwVmi1114112TQFiTQEeTQDhZQHoZb@Base 12
+ _D3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi13Vmi21ZQvTSQDoQDn__TQBiVmi9Vmi13ZQBvTSQEpQEo__TQCjVmi0Vmi9ZQCvZQFd6__initZ@Base 12
+ _D3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi13Vmi21ZQvTSQDoQDn__TQBiVmi9Vmi13ZQBvTSQEpQEo__TQCjVmi0Vmi9ZQCvZQFd9__xtoHashFNbNeKxSQGjQGi__TQGhTQGfTwVmi1114112TQFhTQEdTQDgZQHnZm@Base 12
+ _D3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi13Vmi21ZQvTSQDoQDn__TQBiVmi9Vmi13ZQBvTSQEpQEo__TQCjVmi0Vmi9ZQCvZQFd__T6__ctorZQiMFNaNbNcNiNeSQGrQGq__T10MultiArrayTSQHoQHn__TQGyTkVmi8ZQHiTSQImQIl__TQHwTkVmi12ZQIhTQItZQCoZSQJtQJs__TQJrTQJpTwVmi1114112TQIrTQHnTQGqZQKx@Base 12
+ _D3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi13Vmi21ZQvTSQDoQDn__TQBiVmi9Vmi13ZQBvTSQEpQEo__TQCjVmi0Vmi9ZQCvZQFd__T6__ctorZQiMxFNaNbNcNiNeAxmQdQfZxSQHbQHa__TQGzTQGxTwVmi1114112TQFzTQEvTQDyZQIf@Base 12
+ _D3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi13Vmi21ZQvTSQDoQDn__TQBiVmi9Vmi13ZQBvTSQEpQEo__TQCjVmi0Vmi9ZQCvZQFd__T7opIndexZQjMxFNaNbNiNewZb@Base 12
+ _D3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi14Vmi21ZQvTSQDoQDn__TQBiVmi10Vmi14ZQBwTSQEqQEp__TQCkVmi6Vmi10ZQCxTSQFrQFq__TQDlVmi0Vmi6ZQDxZQGf11__xopEqualsMxFKxSQHmQHl__TQHkTQHiTwVmi1114112TQGkTQFgTQEiTQDlZQIuZb@Base 12
+ _D3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi14Vmi21ZQvTSQDoQDn__TQBiVmi10Vmi14ZQBwTSQEqQEp__TQCkVmi6Vmi10ZQCxTSQFrQFq__TQDlVmi0Vmi6ZQDxZQGf6__initZ@Base 12
+ _D3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi14Vmi21ZQvTSQDoQDn__TQBiVmi10Vmi14ZQBwTSQEqQEp__TQCkVmi6Vmi10ZQCxTSQFrQFq__TQDlVmi0Vmi6ZQDxZQGf9__xtoHashFNbNeKxSQHlQHk__TQHjTQHhTwVmi1114112TQGjTQFfTQEhTQDkZQItZm@Base 12
+ _D3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi14Vmi21ZQvTSQDoQDn__TQBiVmi10Vmi14ZQBwTSQEqQEp__TQCkVmi6Vmi10ZQCxTSQFrQFq__TQDlVmi0Vmi6ZQDxZQGf__T6__ctorZQiMFNaNbNcNiNeSQHtQHs__T10MultiArrayTSQIqQIp__TQIaTkVmi7ZQIkTSQJoQJn__TQIyTkVmi11ZQJjTSQKnQKm__TQJxTkVmi15ZQKiTQKuZQDnZSQLuQLt__TQLsTQLqTwVmi1114112TQKsTQJoTQIqTQHtZQNc@Base 12
+ _D3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi14Vmi21ZQvTSQDoQDn__TQBiVmi10Vmi14ZQBwTSQEqQEp__TQCkVmi6Vmi10ZQCxTSQFrQFq__TQDlVmi0Vmi6ZQDxZQGf__T6__ctorZQiMxFNaNbNcNiNeAxmQdQfZxSQIdQIc__TQIbTQHzTwVmi1114112TQHbTQFxTQEzTQEcZQJl@Base 12
+ _D3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi14Vmi21ZQvTSQDoQDn__TQBiVmi10Vmi14ZQBwTSQEqQEp__TQCkVmi6Vmi10ZQCxTSQFrQFq__TQDlVmi0Vmi6ZQDxZQGf__T7opIndexZQjMxFNaNbNiNewZb@Base 12
+ _D3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi8Vmi21ZQuTSQDnQDm__TQBhVmi0Vmi8ZQBtZQEb11__xopEqualsMxFKxSQFiQFh__TQFgTQFeTwVmi1114112TQEgTQDdZQGiZb@Base 12
+ _D3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi8Vmi21ZQuTSQDnQDm__TQBhVmi0Vmi8ZQBtZQEb6__initZ@Base 12
+ _D3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi8Vmi21ZQuTSQDnQDm__TQBhVmi0Vmi8ZQBtZQEb9__xtoHashFNbNeKxSQFhQFg__TQFfTQFdTwVmi1114112TQEfTQDcZQGhZm@Base 12
+ _D3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi8Vmi21ZQuTSQDnQDm__TQBhVmi0Vmi8ZQBtZQEb__T6__ctorZQiMFNaNbNcNiNeSQFpQFo__T10MultiArrayTSQGmQGl__TQFwTkVmi13ZQGhTQGtZQBqZSQHtQHs__TQHrTQHpTwVmi1114112TQGrTQFoZQIt@Base 12
+ _D3std3uni__T4TrieTSQsQq__T9BitPackedTbVmi1ZQrTwVmi1114112TSQCgQCf__T9sliceBitsVmi8Vmi21ZQuTSQDnQDm__TQBhVmi0Vmi8ZQBtZQEb__T7opIndexZQjMxFNaNbNiNewZb@Base 12
+ _D3std3uni__T4TrieThTwVmi1114112TSQBgQBf__T9sliceBitsVmi13Vmi21ZQvTSQCoQCn__TQBiVmi6Vmi13ZQBvTSQDpQDo__TQCjVmi0Vmi6ZQCvZQEd11__xopEqualsMxFKxSQFkQFj__TQFiThTwVmi1114112TQFgTQEcTQDfZQGmZb@Base 12
+ _D3std3uni__T4TrieThTwVmi1114112TSQBgQBf__T9sliceBitsVmi13Vmi21ZQvTSQCoQCn__TQBiVmi6Vmi13ZQBvTSQDpQDo__TQCjVmi0Vmi6ZQCvZQEd6__initZ@Base 12
+ _D3std3uni__T4TrieThTwVmi1114112TSQBgQBf__T9sliceBitsVmi13Vmi21ZQvTSQCoQCn__TQBiVmi6Vmi13ZQBvTSQDpQDo__TQCjVmi0Vmi6ZQCvZQEd9__xtoHashFNbNeKxSQFjQFi__TQFhThTwVmi1114112TQFfTQEbTQDeZQGlZm@Base 12
+ _D3std3uni__T4TrieThTwVmi1114112TSQBgQBf__T9sliceBitsVmi13Vmi21ZQvTSQCoQCn__TQBiVmi6Vmi13ZQBvTSQDpQDo__TQCjVmi0Vmi6ZQCvZQEd__T6__ctorZQiMFNaNbNcNiNeSQFrQFq__T10MultiArrayTSQGoQGn__T9BitPackedTkVmi8ZQrTSQHsQHr__TQBeTkVmi15ZQBpThZQCsZSQIxQIw__TQIvThTwVmi1114112TQItTQHpTQGsZQJz@Base 12
+ _D3std3uni__T4TrieThTwVmi1114112TSQBgQBf__T9sliceBitsVmi13Vmi21ZQvTSQCoQCn__TQBiVmi6Vmi13ZQBvTSQDpQDo__TQCjVmi0Vmi6ZQCvZQEd__T6__ctorZQiMxFNaNbNcNiNeAxmQdQfZxSQGbQGa__TQFzThTwVmi1114112TQFxTQEtTQDwZQHd@Base 12
+ _D3std3uni__T4TrieThTwVmi1114112TSQBgQBf__T9sliceBitsVmi13Vmi21ZQvTSQCoQCn__TQBiVmi6Vmi13ZQBvTSQDpQDo__TQCjVmi0Vmi6ZQCvZQEd__T7opIndexZQjMxFNaNbNiNewZh@Base 12
+ _D3std3uni__T4TrieTtTwVmi1114112TSQBgQBf__T9sliceBitsVmi13Vmi21ZQvTSQCoQCn__TQBiVmi5Vmi13ZQBvTSQDpQDo__TQCjVmi0Vmi5ZQCvZQEd11__xopEqualsMxFKxSQFkQFj__TQFiTtTwVmi1114112TQFgTQEcTQDfZQGmZb@Base 12
+ _D3std3uni__T4TrieTtTwVmi1114112TSQBgQBf__T9sliceBitsVmi13Vmi21ZQvTSQCoQCn__TQBiVmi5Vmi13ZQBvTSQDpQDo__TQCjVmi0Vmi5ZQCvZQEd6__initZ@Base 12
+ _D3std3uni__T4TrieTtTwVmi1114112TSQBgQBf__T9sliceBitsVmi13Vmi21ZQvTSQCoQCn__TQBiVmi5Vmi13ZQBvTSQDpQDo__TQCjVmi0Vmi5ZQCvZQEd9__xtoHashFNbNeKxSQFjQFi__TQFhTtTwVmi1114112TQFfTQEbTQDeZQGlZm@Base 12
+ _D3std3uni__T4TrieTtTwVmi1114112TSQBgQBf__T9sliceBitsVmi13Vmi21ZQvTSQCoQCn__TQBiVmi5Vmi13ZQBvTSQDpQDo__TQCjVmi0Vmi5ZQCvZQEd__T6__ctorZQiMFNaNbNcNiNeSQFrQFq__T10MultiArrayTSQGoQGn__T9BitPackedTkVmi8ZQrTSQHsQHr__TQBeTkVmi16ZQBpTtZQCsZSQIxQIw__TQIvTtTwVmi1114112TQItTQHpTQGsZQJz@Base 12
+ _D3std3uni__T4TrieTtTwVmi1114112TSQBgQBf__T9sliceBitsVmi13Vmi21ZQvTSQCoQCn__TQBiVmi5Vmi13ZQBvTSQDpQDo__TQCjVmi0Vmi5ZQCvZQEd__T6__ctorZQiMxFNaNbNcNiNeAxmQdQfZxSQGbQGa__TQFzTtTwVmi1114112TQFxTQEtTQDwZQHd@Base 12
+ _D3std3uni__T4TrieTtTwVmi1114112TSQBgQBf__T9sliceBitsVmi13Vmi21ZQvTSQCoQCn__TQBiVmi6Vmi13ZQBvTSQDpQDo__TQCjVmi0Vmi6ZQCvZQEd11__xopEqualsMxFKxSQFkQFj__TQFiTtTwVmi1114112TQFgTQEcTQDfZQGmZb@Base 12
+ _D3std3uni__T4TrieTtTwVmi1114112TSQBgQBf__T9sliceBitsVmi13Vmi21ZQvTSQCoQCn__TQBiVmi6Vmi13ZQBvTSQDpQDo__TQCjVmi0Vmi6ZQCvZQEd6__initZ@Base 12
+ _D3std3uni__T4TrieTtTwVmi1114112TSQBgQBf__T9sliceBitsVmi13Vmi21ZQvTSQCoQCn__TQBiVmi6Vmi13ZQBvTSQDpQDo__TQCjVmi0Vmi6ZQCvZQEd9__xtoHashFNbNeKxSQFjQFi__TQFhTtTwVmi1114112TQFfTQEbTQDeZQGlZm@Base 12
+ _D3std3uni__T4TrieTtTwVmi1114112TSQBgQBf__T9sliceBitsVmi13Vmi21ZQvTSQCoQCn__TQBiVmi6Vmi13ZQBvTSQDpQDo__TQCjVmi0Vmi6ZQCvZQEd__T6__ctorZQiMFNaNbNcNiNeSQFrQFq__T10MultiArrayTSQGoQGn__T9BitPackedTkVmi8ZQrTSQHsQHr__TQBeTkVmi15ZQBpTtZQCsZSQIxQIw__TQIvTtTwVmi1114112TQItTQHpTQGsZQJz@Base 12
+ _D3std3uni__T4TrieTtTwVmi1114112TSQBgQBf__T9sliceBitsVmi13Vmi21ZQvTSQCoQCn__TQBiVmi6Vmi13ZQBvTSQDpQDo__TQCjVmi0Vmi6ZQCvZQEd__T6__ctorZQiMxFNaNbNcNiNeAxmQdQfZxSQGbQGa__TQFzTtTwVmi1114112TQFxTQEtTQDwZQHd@Base 12
+ _D3std3uni__T4TrieTtTwVmi1114112TSQBgQBf__T9sliceBitsVmi13Vmi21ZQvTSQCoQCn__TQBiVmi6Vmi13ZQBvTSQDpQDo__TQCjVmi0Vmi6ZQCvZQEd__T7opIndexZQjMxFNaNbNiNewZt@Base 12
+ _D3std3uni__T4TrieTtTwVmi1114112TSQBgQBf__T9sliceBitsVmi9Vmi21ZQuTSQCnQCm__TQBhVmi0Vmi9ZQBtZQDb11__xopEqualsMxFKxSQEiQEh__TQEgTtTwVmi1114112TQEeTQDbZQFgZb@Base 12
+ _D3std3uni__T4TrieTtTwVmi1114112TSQBgQBf__T9sliceBitsVmi9Vmi21ZQuTSQCnQCm__TQBhVmi0Vmi9ZQBtZQDb6__initZ@Base 12
+ _D3std3uni__T4TrieTtTwVmi1114112TSQBgQBf__T9sliceBitsVmi9Vmi21ZQuTSQCnQCm__TQBhVmi0Vmi9ZQBtZQDb9__xtoHashFNbNeKxSQEhQEg__TQEfTtTwVmi1114112TQEdTQDaZQFfZm@Base 12
+ _D3std3uni__T4TrieTtTwVmi1114112TSQBgQBf__T9sliceBitsVmi9Vmi21ZQuTSQCnQCm__TQBhVmi0Vmi9ZQBtZQDb__T6__ctorZQiMFNaNbNcNiNeSQEpQEo__T10MultiArrayTSQFmQFl__T9BitPackedTkVmi12ZQsTtZQBuZSQGxQGw__TQGvTtTwVmi1114112TQGtTQFqZQHv@Base 12
+ _D3std3uni__T4TrieTtTwVmi1114112TSQBgQBf__T9sliceBitsVmi9Vmi21ZQuTSQCnQCm__TQBhVmi0Vmi9ZQBtZQDb__T6__ctorZQiMxFNaNbNcNiNeAxmQdQfZxSQEzQEy__TQExTtTwVmi1114112TQEvTQDsZQFx@Base 12
+ _D3std3uni__T4TrieTtTwVmi1114112TSQBgQBf__T9sliceBitsVmi9Vmi21ZQuTSQCnQCm__TQBhVmi0Vmi9ZQBtZQDb__T7opIndexZQjMxFNaNbNiNewZt@Base 12
+ _D3std3uni__T4icmpTAxaTQeZQnFNaNbNiNfQsQuZi@Base 12
+ _D3std3uni__T4icmpTAxuTQeZQnFNaNbNiNfQsQuZi@Base 12
+ _D3std3uni__T4icmpTAxwTQeZQnFNaNbNiNfQsQuZi@Base 12
+ _D3std3uni__T5StackTEQtQr__T16UnicodeSetParserTSQBu5regex8internal6parser__T6ParserTAyaTSQDjQBpQBmQBg7CodeGenZQBiZQDi8OperatorZQEk11__xopEqualsMxFKxSQFrQFq__TQFpTQFmZQFxZb@Base 12
+ _D3std3uni__T5StackTEQtQr__T16UnicodeSetParserTSQBu5regex8internal6parser__T6ParserTAyaTSQDjQBpQBmQBg7CodeGenZQBiZQDi8OperatorZQEk3popMFNbNeZQEr@Base 12
+ _D3std3uni__T5StackTEQtQr__T16UnicodeSetParserTSQBu5regex8internal6parser__T6ParserTAyaTSQDjQBpQBmQBg7CodeGenZQBiZQDi8OperatorZQEk3topMFNaNbNcNdNiNfZQEz@Base 12
+ _D3std3uni__T5StackTEQtQr__T16UnicodeSetParserTSQBu5regex8internal6parser__T6ParserTAyaTSQDjQBpQBmQBg7CodeGenZQBiZQDi8OperatorZQEk4pushMFNaNbNfQEtZv@Base 12
+ _D3std3uni__T5StackTEQtQr__T16UnicodeSetParserTSQBu5regex8internal6parser__T6ParserTAyaTSQDjQBpQBmQBg7CodeGenZQBiZQDi8OperatorZQEk5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std3uni__T5StackTEQtQr__T16UnicodeSetParserTSQBu5regex8internal6parser__T6ParserTAyaTSQDjQBpQBmQBg7CodeGenZQBiZQDi8OperatorZQEk6__initZ@Base 12
+ _D3std3uni__T5StackTEQtQr__T16UnicodeSetParserTSQBu5regex8internal6parser__T6ParserTAyaTSQDjQBpQBmQBg7CodeGenZQBiZQDi8OperatorZQEk6lengthMFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T5StackTEQtQr__T16UnicodeSetParserTSQBu5regex8internal6parser__T6ParserTAyaTSQDjQBpQBmQBg7CodeGenZQBiZQDi8OperatorZQEk9__xtoHashFNbNeKxSQFqQFp__TQFoTQFlZQFwZm@Base 12
+ _D3std3uni__T5StackTSQt8typecons__T5TupleTkTkTkZQnZQBm11__xopEqualsMxFKxSQCtQCs__TQCrTQCoZQCzZb@Base 12
+ _D3std3uni__T5StackTSQt8typecons__T5TupleTkTkTkZQnZQBm3popMFNbNeZQBt@Base 12
+ _D3std3uni__T5StackTSQt8typecons__T5TupleTkTkTkZQnZQBm3topMFNaNbNcNdNiNfZQCb@Base 12
+ _D3std3uni__T5StackTSQt8typecons__T5TupleTkTkTkZQnZQBm4pushMFNaNbNfQBvZv@Base 12
+ _D3std3uni__T5StackTSQt8typecons__T5TupleTkTkTkZQnZQBm5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std3uni__T5StackTSQt8typecons__T5TupleTkTkTkZQnZQBm6__initZ@Base 12
+ _D3std3uni__T5StackTSQt8typecons__T5TupleTkTkTkZQnZQBm6lengthMFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T5StackTSQt8typecons__T5TupleTkTkTkZQnZQBm9__xtoHashFNbNeKxSQCsQCr__TQCqTQCnZQCyZm@Base 12
+ _D3std3uni__T5StackTSQtQr__T13InversionListTSQBrQBq8GcPolicyZQBhZQCa11__xopEqualsMxFKxSQDhQDg__TQDfTQDcZQDnZb@Base 12
+ _D3std3uni__T5StackTSQtQr__T13InversionListTSQBrQBq8GcPolicyZQBhZQCa3popMFNbNeZQCh@Base 12
+ _D3std3uni__T5StackTSQtQr__T13InversionListTSQBrQBq8GcPolicyZQBhZQCa3topMFNaNbNcNdNiNfZQCp@Base 12
+ _D3std3uni__T5StackTSQtQr__T13InversionListTSQBrQBq8GcPolicyZQBhZQCa4pushMFNaNbNfQCjZv@Base 12
+ _D3std3uni__T5StackTSQtQr__T13InversionListTSQBrQBq8GcPolicyZQBhZQCa5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std3uni__T5StackTSQtQr__T13InversionListTSQBrQBq8GcPolicyZQBhZQCa6__initZ@Base 12
+ _D3std3uni__T5StackTSQtQr__T13InversionListTSQBrQBq8GcPolicyZQBhZQCa6lengthMFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T5StackTSQtQr__T13InversionListTSQBrQBq8GcPolicyZQBhZQCa9__xtoHashFNbNeKxSQDgQDf__TQDeTQDbZQDmZm@Base 12
+ _D3std3uni__T5StackTkZQj11__xopEqualsMxFKxSQBpQBo__TQBnTkZQBtZb@Base 12
+ _D3std3uni__T5StackTkZQj3popMFNbNeZk@Base 12
+ _D3std3uni__T5StackTkZQj3topMFNaNbNcNdNiNfZk@Base 12
+ _D3std3uni__T5StackTkZQj4pushMFNaNbNfkZv@Base 12
+ _D3std3uni__T5StackTkZQj5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std3uni__T5StackTkZQj6__initZ@Base 12
+ _D3std3uni__T5StackTkZQj6lengthMFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T5StackTkZQj9__xtoHashFNbNeKxSQBoQBn__TQBmTkZQBsZm@Base 12
+ _D3std3uni__T5forceTSQtQr__T9BitPackedTkVmi11ZQsTQBdZQBoFNaNbNiNfQBtZQBx@Base 12
+ _D3std3uni__T5forceTSQtQr__T9BitPackedTkVmi11ZQsTmZQBmFNaNbNiNfmZQBt@Base 12
+ _D3std3uni__T5forceTSQtQr__T9BitPackedTkVmi12ZQsTQBdZQBoFNaNbNiNfQBtZQBx@Base 12
+ _D3std3uni__T5forceTSQtQr__T9BitPackedTkVmi12ZQsTmZQBmFNaNbNiNfmZQBt@Base 12
+ _D3std3uni__T5forceTSQtQr__T9BitPackedTkVmi13ZQsTQBdZQBoFNaNbNiNfQBtZQBx@Base 12
+ _D3std3uni__T5forceTSQtQr__T9BitPackedTkVmi13ZQsTmZQBmFNaNbNiNfmZQBt@Base 12
+ _D3std3uni__T5forceTSQtQr__T9BitPackedTkVmi14ZQsTQBdZQBoFNaNbNiNfQBtZQBx@Base 12
+ _D3std3uni__T5forceTSQtQr__T9BitPackedTkVmi14ZQsTmZQBmFNaNbNiNfmZQBt@Base 12
+ _D3std3uni__T5forceTSQtQr__T9BitPackedTkVmi15ZQsTQBdZQBoFNaNbNiNfQBtZQBx@Base 12
+ _D3std3uni__T5forceTSQtQr__T9BitPackedTkVmi15ZQsTmZQBmFNaNbNiNfmZQBt@Base 12
+ _D3std3uni__T5forceTSQtQr__T9BitPackedTkVmi16ZQsTQBdZQBoFNaNbNiNfQBtZQBx@Base 12
+ _D3std3uni__T5forceTSQtQr__T9BitPackedTkVmi16ZQsTmZQBmFNaNbNiNfmZQBt@Base 12
+ _D3std3uni__T5forceTSQtQr__T9BitPackedTkVmi7ZQrTQBcZQBnFNaNbNiNfQBsZQBw@Base 12
+ _D3std3uni__T5forceTSQtQr__T9BitPackedTkVmi7ZQrTmZQBlFNaNbNiNfmZQBs@Base 12
+ _D3std3uni__T5forceTSQtQr__T9BitPackedTkVmi8ZQrTQBcZQBnFNaNbNiNfQBsZQBw@Base 12
+ _D3std3uni__T5forceTSQtQr__T9BitPackedTkVmi8ZQrTmZQBlFNaNbNiNfmZQBs@Base 12
+ _D3std3uni__T5forceTkTiZQlFNaNbNiNfiZk@Base 12
+ _D3std3uni__T5sicmpTAxaTQeZQoFNaNbNiNfMQtMQwZi@Base 12
+ _D3std3uni__T5sicmpTAxuTQeZQoFNaNbNiNfMQtMQwZi@Base 12
+ _D3std3uni__T5sicmpTAxwTQeZQoFNaNbNiNfMQtMQwZi@Base 12
+ _D3std3uni__T6asTrieTbVii7Vii4Vii4Vii6ZQBaFNaNbNiNfMxSQCa8internal14unicode_tables__T9TrieEntryTbVii7Vii4Vii4Vii6ZQBdZxSQEoQEn__T4TrieTSQFeQFd__T9BitPackedTbVmi1ZQrTwVmi1114112TSQGuQGt__T9sliceBitsVmi14Vmi21ZQvTSQIcQIb__TQBiVmi10Vmi14ZQBwTSQJeQJd__TQCkVmi6Vmi10ZQCxTSQKfQKe__TQDlVmi0Vmi6ZQDxZQGh@Base 12
+ _D3std3uni__T6asTrieTbVii8Vii4Vii9ZQwFNaNbNiNfMxSQBv8internal14unicode_tables__T9TrieEntryTbVii8Vii4Vii9ZQzZxSQEeQEd__T4TrieTSQEuQEt__T9BitPackedTbVmi1ZQrTwVmi1114112TSQGkQGj__T9sliceBitsVmi13Vmi21ZQvTSQHsQHr__TQBiVmi9Vmi13ZQBvTSQItQIs__TQCjVmi0Vmi9ZQCvZQFf@Base 12
+ _D3std3uni__T6asTrieTbVii8Vii5Vii8ZQwFNaNbNiNfMxSQBv8internal14unicode_tables__T9TrieEntryTbVii8Vii5Vii8ZQzZxSQEeQEd__T4TrieTSQEuQEt__T9BitPackedTbVmi1ZQrTwVmi1114112TSQGkQGj__T9sliceBitsVmi13Vmi21ZQvTSQHsQHr__TQBiVmi8Vmi13ZQBvTSQItQIs__TQCjVmi0Vmi8ZQCvZQFf@Base 12
+ _D3std3uni__T6asTrieTbVii8Vii6Vii7ZQwFNaNbNiNfMxSQBv8internal14unicode_tables__T9TrieEntryTbVii8Vii6Vii7ZQzZxSQEeQEd__T4TrieTSQEuQEt__T9BitPackedTbVmi1ZQrTwVmi1114112TSQGkQGj__T9sliceBitsVmi13Vmi21ZQvTSQHsQHr__TQBiVmi7Vmi13ZQBvTSQItQIs__TQCjVmi0Vmi7ZQCvZQFf@Base 12
+ _D3std3uni__T6asTrieThVii8Vii7Vii6ZQwFNaNbNiNfMxSQBv8internal14unicode_tables__T9TrieEntryThVii8Vii7Vii6ZQzZxSQEeQEd__T4TrieThTwVmi1114112TSQFiQFh__T9sliceBitsVmi13Vmi21ZQvTSQGqQGp__TQBiVmi6Vmi13ZQBvTSQHrQHq__TQCjVmi0Vmi6ZQCvZQEd@Base 12
+ _D3std3uni__T6asTrieTtVii12Vii9ZQtFNaNbNiNfMxSQBs8internal14unicode_tables__T9TrieEntryTtVii12Vii9ZQwZxSQDyQDx__T4TrieTtTwVmi1114112TSQFcQFb__T9sliceBitsVmi9Vmi21ZQuTSQGjQGi__TQBhVmi0Vmi9ZQBtZQDb@Base 12
+ _D3std3uni__T6asTrieTtVii8Vii7Vii6ZQwFNaNbNiNfMxSQBv8internal14unicode_tables__T9TrieEntryTtVii8Vii7Vii6ZQzZxSQEeQEd__T4TrieTtTwVmi1114112TSQFiQFh__T9sliceBitsVmi13Vmi21ZQvTSQGqQGp__TQBiVmi6Vmi13ZQBvTSQHrQHq__TQCjVmi0Vmi6ZQCvZQEd@Base 12
+ _D3std3uni__T6asTrieTtVii8Vii8Vii5ZQwFNaNbNiNfMxSQBv8internal14unicode_tables__T9TrieEntryTtVii8Vii8Vii5ZQzZxSQEeQEd__T4TrieTtTwVmi1114112TSQFiQFh__T9sliceBitsVmi13Vmi21ZQvTSQGqQGp__TQBiVmi5Vmi13ZQBvTSQHrQHq__TQCjVmi0Vmi5ZQCvZQEd@Base 12
+ _D3std3uni__T6toCaseS_DQvQt12toLowerIndexFNaNbNiNewZtVii1043S_DQCjQCi10toLowerTabFNaNbNiNemZwSQDo5ascii7toLowerTAaZQDyFNaNfQlZQo@Base 12
+ _D3std3uni__T6toCaseS_DQvQt12toLowerIndexFNaNbNiNewZtVii1043S_DQCjQCi10toLowerTabFNaNbNiNemZwSQDo5ascii7toLowerTAxaZQDzFNaNfQmZQp@Base 12
+ _D3std3uni__T6toCaseS_DQvQt12toLowerIndexFNaNbNiNewZtVii1043S_DQCjQCi10toLowerTabFNaNbNiNemZwSQDo5ascii7toLowerTAyaZQDzFNaNfQmZQp@Base 12
+ _D3std3uni__T6toCaseS_DQvQt12toLowerIndexFNaNbNiNewZtVii1043S_DQCjQCi10toLowerTabFNaNbNiNemZwSQDo5ascii7toLowerTAyuZQDzFNaNfQmZQp@Base 12
+ _D3std3uni__T6toCaseS_DQvQt12toLowerIndexFNaNbNiNewZtVii1043S_DQCjQCi10toLowerTabFNaNbNiNemZwSQDo5ascii7toLowerTAywZQDzFNaNbNfQoZQr@Base 12
+ _D3std3uni__T6toCaseS_DQvQt12toUpperIndexFNaNbNiNewZtVii1051S_DQCjQCi10toUpperTabFNaNbNiNemZwSQDo5ascii7toUpperTAyaZQDzFNaNfQmZQp@Base 12
+ _D3std3uni__T6toCaseS_DQvQt12toUpperIndexFNaNbNiNewZtVii1051S_DQCjQCi10toUpperTabFNaNbNiNemZwSQDo5ascii7toUpperTAyuZQDzFNaNfQmZQp@Base 12
+ _D3std3uni__T6toCaseS_DQvQt12toUpperIndexFNaNbNiNewZtVii1051S_DQCjQCi10toUpperTabFNaNbNiNemZwSQDo5ascii7toUpperTAywZQDzFNaNbNfQoZQr@Base 12
+ _D3std3uni__T7toLowerTAaZQmFNaNeNkMQnZQq@Base 12
+ _D3std3uni__T7toLowerTAxaZQnFNaNeNkMQoZQr@Base 12
+ _D3std3uni__T7toLowerTAyaZQnFNaNeNkMQoZQr@Base 12
+ _D3std3uni__T7toLowerTAyuZQnFNaNeNkMQoZQr@Base 12
+ _D3std3uni__T7toLowerTAywZQnFNaNbNeNkMQqZQt@Base 12
+ _D3std3uni__T7toUpperTAyaZQnFNaNeNkMQoZQr@Base 12
+ _D3std3uni__T7toUpperTAyuZQnFNaNeNkMQoZQr@Base 12
+ _D3std3uni__T7toUpperTAywZQnFNaNbNeNkMQqZQt@Base 12
+ _D3std3uni__T8CowArrayTSQwQu13ReallocPolicyZQBf10__postblitMFNaNbNiNfZv@Base 12
+ _D3std3uni__T8CowArrayTSQwQu13ReallocPolicyZQBf11__xopEqualsMxFKxSQCmQCl__TQCkTQCeZQCsZb@Base 12
+ _D3std3uni__T8CowArrayTSQwQu13ReallocPolicyZQBf13opIndexAssignMFNaNbNiNfkmZv@Base 12
+ _D3std3uni__T8CowArrayTSQwQu13ReallocPolicyZQBf16dupThisReferenceMFNaNbNiNfkZv@Base 12
+ _D3std3uni__T8CowArrayTSQwQu13ReallocPolicyZQBf17freeThisReferenceMFNaNbNiNfZv@Base 12
+ _D3std3uni__T8CowArrayTSQwQu13ReallocPolicyZQBf5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std3uni__T8CowArrayTSQwQu13ReallocPolicyZQBf5reuseFNaNbNiNfAkZSQCmQCl__TQCkTQCeZQCs@Base 12
+ _D3std3uni__T8CowArrayTSQwQu13ReallocPolicyZQBf6__dtorMFNaNbNiNfZv@Base 12
+ _D3std3uni__T8CowArrayTSQwQu13ReallocPolicyZQBf6__initZ@Base 12
+ _D3std3uni__T8CowArrayTSQwQu13ReallocPolicyZQBf6lengthMFNaNbNdNiNfmZv@Base 12
+ _D3std3uni__T8CowArrayTSQwQu13ReallocPolicyZQBf6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T8CowArrayTSQwQu13ReallocPolicyZQBf7opSliceMFNaNbNiNfZAk@Base 12
+ _D3std3uni__T8CowArrayTSQwQu13ReallocPolicyZQBf7opSliceMFNaNbNiNfmmZAk@Base 12
+ _D3std3uni__T8CowArrayTSQwQu13ReallocPolicyZQBf7opSliceMxFNaNbNiNfZAxk@Base 12
+ _D3std3uni__T8CowArrayTSQwQu13ReallocPolicyZQBf7opSliceMxFNaNbNiNfmmZAxk@Base 12
+ _D3std3uni__T8CowArrayTSQwQu13ReallocPolicyZQBf8opAssignMFNaNbNcNiNjNeSQCrQCq__TQCpTQCjZQCxZQw@Base 12
+ _D3std3uni__T8CowArrayTSQwQu13ReallocPolicyZQBf8refCountMFNaNbNdNiNfkZv@Base 12
+ _D3std3uni__T8CowArrayTSQwQu13ReallocPolicyZQBf8refCountMxFNaNbNdNiNfZk@Base 12
+ _D3std3uni__T8CowArrayTSQwQu13ReallocPolicyZQBf9__xtoHashFNbNeKxSQClQCk__TQCjTQCdZQCrZm@Base 12
+ _D3std3uni__T8CowArrayTSQwQu13ReallocPolicyZQBf__T8opEqualsZQkMxFNaNbNiNfKxSQCwQCv__TQCuTQCoZQDcZb@Base 12
+ _D3std3uni__T8CowArrayTSQwQu8GcPolicyZQz10__postblitMFNaNbNiNfZv@Base 12
+ _D3std3uni__T8CowArrayTSQwQu8GcPolicyZQz11__xopEqualsMxFKxSQCfQCe__TQCdTQBxZQClZb@Base 12
+ _D3std3uni__T8CowArrayTSQwQu8GcPolicyZQz13opIndexAssignMFNaNbNfkmZv@Base 12
+ _D3std3uni__T8CowArrayTSQwQu8GcPolicyZQz16dupThisReferenceMFNaNbNfkZv@Base 12
+ _D3std3uni__T8CowArrayTSQwQu8GcPolicyZQz17freeThisReferenceMFNaNbNiNfZv@Base 12
+ _D3std3uni__T8CowArrayTSQwQu8GcPolicyZQz5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std3uni__T8CowArrayTSQwQu8GcPolicyZQz5reuseFNaNbNfAkZSQCdQCc__TQCbTQBvZQCj@Base 12
+ _D3std3uni__T8CowArrayTSQwQu8GcPolicyZQz6__dtorMFNaNbNiNfZv@Base 12
+ _D3std3uni__T8CowArrayTSQwQu8GcPolicyZQz6__initZ@Base 12
+ _D3std3uni__T8CowArrayTSQwQu8GcPolicyZQz6lengthMFNaNbNdNfmZv@Base 12
+ _D3std3uni__T8CowArrayTSQwQu8GcPolicyZQz6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std3uni__T8CowArrayTSQwQu8GcPolicyZQz7opSliceMFNaNbNfZAk@Base 12
+ _D3std3uni__T8CowArrayTSQwQu8GcPolicyZQz7opSliceMFNaNbNfmmZAk@Base 12
+ _D3std3uni__T8CowArrayTSQwQu8GcPolicyZQz7opSliceMxFNaNbNiNfZAxk@Base 12
+ _D3std3uni__T8CowArrayTSQwQu8GcPolicyZQz7opSliceMxFNaNbNiNfmmZAxk@Base 12
+ _D3std3uni__T8CowArrayTSQwQu8GcPolicyZQz8opAssignMFNaNbNcNiNjNeSQCkQCj__TQCiTQCcZQCqZQw@Base 12
+ _D3std3uni__T8CowArrayTSQwQu8GcPolicyZQz8refCountMFNaNbNdNiNfkZv@Base 12
+ _D3std3uni__T8CowArrayTSQwQu8GcPolicyZQz8refCountMxFNaNbNdNiNfZk@Base 12
+ _D3std3uni__T8CowArrayTSQwQu8GcPolicyZQz9__xtoHashFNbNeKxSQCeQCd__TQCcTQBwZQCkZm@Base 12
+ _D3std3uni__T8CowArrayTSQwQu8GcPolicyZQz__T6__ctorTAkZQlMFNaNbNcNfQpZSQCqQCp__TQCoTQCiZQCw@Base 12
+ _D3std3uni__T8CowArrayTSQwQu8GcPolicyZQz__T6__ctorTSQBy5range__T10roundRobinTSQCy9algorithm9iteration__T9MapResultSQEj10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQGnQGm21DecompressedIntervalsZQDtTSQHwQEyQEr__TQEkSQImQEd__TQDuVQDoa4_615b315dVQEda1_61ZQEwTQDpZQGhZQHzFQHqQCvZ6ResultZQJnMFNaNcNfQJqZSQLsQLr__TQLqTQLkZQLy@Base 12
+ _D3std3uni__T8CowArrayTSQwQu8GcPolicyZQz__T6appendZQiMFNaNbNfAkXv@Base 12
+ _D3std3uni__T8CowArrayTSQwQu8GcPolicyZQz__T7opIndexZQjMxFNaNbNiNfmZk@Base 12
+ _D3std3uni__T8CowArrayTSQwQu8GcPolicyZQz__T8opEqualsZQkMxFNaNbNiNfKxSQCpQCo__TQCnTQChZQCvZb@Base 12
+ _D3std3uni__T8spaceForVmi11ZQpFNaNbNiNfmZm@Base 12
+ _D3std3uni__T8spaceForVmi12ZQpFNaNbNiNfmZm@Base 12
+ _D3std3uni__T8spaceForVmi13ZQpFNaNbNiNfmZm@Base 12
+ _D3std3uni__T8spaceForVmi14ZQpFNaNbNiNfmZm@Base 12
+ _D3std3uni__T8spaceForVmi15ZQpFNaNbNiNfmZm@Base 12
+ _D3std3uni__T8spaceForVmi16ZQpFNaNbNiNfmZm@Base 12
+ _D3std3uni__T8spaceForVmi1ZQoFNaNbNiNfmZm@Base 12
+ _D3std3uni__T8spaceForVmi7ZQoFNaNbNiNfmZm@Base 12
+ _D3std3uni__T8spaceForVmi8ZQoFNaNbNiNfmZm@Base 12
+ _D3std3uni__T9BitPackedTbVmi1ZQr6__initZ@Base 12
+ _D3std3uni__T9BitPackedTkVmi11ZQs6__initZ@Base 12
+ _D3std3uni__T9BitPackedTkVmi12ZQs6__initZ@Base 12
+ _D3std3uni__T9BitPackedTkVmi13ZQs6__initZ@Base 12
+ _D3std3uni__T9BitPackedTkVmi14ZQs6__initZ@Base 12
+ _D3std3uni__T9BitPackedTkVmi15ZQs6__initZ@Base 12
+ _D3std3uni__T9BitPackedTkVmi16ZQs6__initZ@Base 12
+ _D3std3uni__T9BitPackedTkVmi7ZQr6__initZ@Base 12
+ _D3std3uni__T9BitPackedTkVmi8ZQr6__initZ@Base 12
+ _D3std3uni__T9sliceBitsVmi0Vmi5ZQt6__initZ@Base 12
+ _D3std3uni__T9sliceBitsVmi0Vmi5ZQt__T6opCallTiZQkFNaNbNiNfiZi@Base 12
+ _D3std3uni__T9sliceBitsVmi0Vmi5ZQt__T6opCallTwZQkFNaNbNiNfwZk@Base 12
+ _D3std3uni__T9sliceBitsVmi0Vmi6ZQt6__initZ@Base 12
+ _D3std3uni__T9sliceBitsVmi0Vmi6ZQt__T6opCallTiZQkFNaNbNiNfiZi@Base 12
+ _D3std3uni__T9sliceBitsVmi0Vmi6ZQt__T6opCallTwZQkFNaNbNiNfwZk@Base 12
+ _D3std3uni__T9sliceBitsVmi0Vmi7ZQt6__initZ@Base 12
+ _D3std3uni__T9sliceBitsVmi0Vmi7ZQt__T6opCallTiZQkFNaNbNiNfiZi@Base 12
+ _D3std3uni__T9sliceBitsVmi0Vmi7ZQt__T6opCallTwZQkFNaNbNiNfwZk@Base 12
+ _D3std3uni__T9sliceBitsVmi0Vmi8ZQt6__initZ@Base 12
+ _D3std3uni__T9sliceBitsVmi0Vmi8ZQt__T6opCallTiZQkFNaNbNiNfiZi@Base 12
+ _D3std3uni__T9sliceBitsVmi0Vmi8ZQt__T6opCallTwZQkFNaNbNiNfwZk@Base 12
+ _D3std3uni__T9sliceBitsVmi0Vmi9ZQt6__initZ@Base 12
+ _D3std3uni__T9sliceBitsVmi0Vmi9ZQt__T6opCallTiZQkFNaNbNiNfiZi@Base 12
+ _D3std3uni__T9sliceBitsVmi0Vmi9ZQt__T6opCallTwZQkFNaNbNiNfwZk@Base 12
+ _D3std3uni__T9sliceBitsVmi10Vmi14ZQv6__initZ@Base 12
+ _D3std3uni__T9sliceBitsVmi10Vmi14ZQv__T6opCallTiZQkFNaNbNiNfiZi@Base 12
+ _D3std3uni__T9sliceBitsVmi10Vmi14ZQv__T6opCallTwZQkFNaNbNiNfwZk@Base 12
+ _D3std3uni__T9sliceBitsVmi13Vmi21ZQv6__initZ@Base 12
+ _D3std3uni__T9sliceBitsVmi13Vmi21ZQv__T6opCallTiZQkFNaNbNiNfiZi@Base 12
+ _D3std3uni__T9sliceBitsVmi13Vmi21ZQv__T6opCallTwZQkFNaNbNiNfwZk@Base 12
+ _D3std3uni__T9sliceBitsVmi14Vmi21ZQv6__initZ@Base 12
+ _D3std3uni__T9sliceBitsVmi14Vmi21ZQv__T6opCallTiZQkFNaNbNiNfiZi@Base 12
+ _D3std3uni__T9sliceBitsVmi14Vmi21ZQv__T6opCallTwZQkFNaNbNiNfwZk@Base 12
+ _D3std3uni__T9sliceBitsVmi5Vmi13ZQu6__initZ@Base 12
+ _D3std3uni__T9sliceBitsVmi5Vmi13ZQu__T6opCallTiZQkFNaNbNiNfiZi@Base 12
+ _D3std3uni__T9sliceBitsVmi5Vmi13ZQu__T6opCallTwZQkFNaNbNiNfwZk@Base 12
+ _D3std3uni__T9sliceBitsVmi6Vmi10ZQu6__initZ@Base 12
+ _D3std3uni__T9sliceBitsVmi6Vmi10ZQu__T6opCallTiZQkFNaNbNiNfiZi@Base 12
+ _D3std3uni__T9sliceBitsVmi6Vmi10ZQu__T6opCallTwZQkFNaNbNiNfwZk@Base 12
+ _D3std3uni__T9sliceBitsVmi6Vmi13ZQu6__initZ@Base 12
+ _D3std3uni__T9sliceBitsVmi6Vmi13ZQu__T6opCallTiZQkFNaNbNiNfiZi@Base 12
+ _D3std3uni__T9sliceBitsVmi6Vmi13ZQu__T6opCallTwZQkFNaNbNiNfwZk@Base 12
+ _D3std3uni__T9sliceBitsVmi7Vmi13ZQu6__initZ@Base 12
+ _D3std3uni__T9sliceBitsVmi7Vmi13ZQu__T6opCallTiZQkFNaNbNiNfiZi@Base 12
+ _D3std3uni__T9sliceBitsVmi7Vmi13ZQu__T6opCallTwZQkFNaNbNiNfwZk@Base 12
+ _D3std3uni__T9sliceBitsVmi8Vmi13ZQu6__initZ@Base 12
+ _D3std3uni__T9sliceBitsVmi8Vmi13ZQu__T6opCallTiZQkFNaNbNiNfiZi@Base 12
+ _D3std3uni__T9sliceBitsVmi8Vmi13ZQu__T6opCallTwZQkFNaNbNiNfwZk@Base 12
+ _D3std3uni__T9sliceBitsVmi8Vmi21ZQu6__initZ@Base 12
+ _D3std3uni__T9sliceBitsVmi8Vmi21ZQu__T6opCallTiZQkFNaNbNiNfiZi@Base 12
+ _D3std3uni__T9sliceBitsVmi8Vmi21ZQu__T6opCallTwZQkFNaNbNiNfwZk@Base 12
+ _D3std3uni__T9sliceBitsVmi9Vmi13ZQu6__initZ@Base 12
+ _D3std3uni__T9sliceBitsVmi9Vmi13ZQu__T6opCallTiZQkFNaNbNiNfiZi@Base 12
+ _D3std3uni__T9sliceBitsVmi9Vmi13ZQu__T6opCallTwZQkFNaNbNiNfwZk@Base 12
+ _D3std3uni__T9sliceBitsVmi9Vmi21ZQu6__initZ@Base 12
+ _D3std3uni__T9sliceBitsVmi9Vmi21ZQu__T6opCallTiZQkFNaNbNiNfiZi@Base 12
+ _D3std3uni__T9sliceBitsVmi9Vmi21ZQu__T6opCallTwZQkFNaNbNiNfwZk@Base 12
+ _D3std3uri10URI_EncodeFNaNfAywkZAya@Base 12
+ _D3std3uri11__moduleRefZ@Base 12
+ _D3std3uri12URIException6__initZ@Base 12
+ _D3std3uri12URIException6__vtblZ@Base 12
+ _D3std3uri12URIException7__ClassZ@Base 12
+ _D3std3uri12URIException8__mixin26__ctorMFNaNbNiNfAyaC6object9ThrowableQvmZCQCwQCvQCu@Base 12
+ _D3std3uri12URIException8__mixin26__ctorMFNaNbNiNfAyaQdmC6object9ThrowableZCQCwQCvQCu@Base 12
+ _D3std3uri12__ModuleInfoZ@Base 12
+ _D3std3uri9ascii2hexFNaNbNiNfwZk@Base 12
+ _D3std3uri9hex2asciiyG16a@Base 12
+ _D3std3uri9uri_flagsyG128h@Base 12
+ _D3std3uri9urlEncodeFNaNfMHAyaQdZQg@Base 12
+ _D3std3uri__T15encodeComponentTaZQuFNaNfMAxaZAya@Base 12
+ _D3std3utf10strideImplFNaNeamZk@Base 12
+ _D3std3utf11__moduleRefZ@Base 12
+ _D3std3utf12UTFException11setSequenceMFNaNbNiNjNfMAkXCQCaQBzQBy@Base 12
+ _D3std3utf12UTFException6__ctorMFNaNbNfAyamQemC6object9ThrowableZCQCmQClQCk@Base 12
+ _D3std3utf12UTFException6__ctorMFNaNbNiNfAyaQdmC6object9ThrowableZCQCnQCmQCl@Base 12
+ _D3std3utf12UTFException6__initZ@Base 12
+ _D3std3utf12UTFException6__vtblZ@Base 12
+ _D3std3utf12UTFException7__ClassZ@Base 12
+ _D3std3utf12UTFException8toStringMxFZAya@Base 12
+ _D3std3utf12__ModuleInfoZ@Base 12
+ _D3std3utf12isValidDcharFNaNbNiNfwZb@Base 12
+ _D3std3utf__T10byCodeUnitTAaZQqFNaNbNiNfQoZSQBqQBp__TQBoTQBfZQBwFQBnZ14ByCodeUnitImpl@Base 12
+ _D3std3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImpl11__xopEqualsMxFKxSQCqQCp__TQCoTQCfZQCwFQCnZQCiZb@Base 12
+ _D3std3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImpl4backMNgFNaNbNcNdNiNfZNga@Base 12
+ _D3std3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImpl4saveMFNaNbNdNiNfZSQCqQCp__TQCoTQCfZQCwFQCnZQCi@Base 12
+ _D3std3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImpl5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImpl5frontMNgFNaNbNcNdNiNfZNga@Base 12
+ _D3std3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImpl6__initZ@Base 12
+ _D3std3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImpl6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImpl7opIndexMNgFNaNbNcNiNfmZNga@Base 12
+ _D3std3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImpl7opSliceMFNaNbNiNfmmZSQCtQCs__TQCrTQCiZQCzFQCqZQCl@Base 12
+ _D3std3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImpl7popBackMFNaNbNiNfZv@Base 12
+ _D3std3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImpl8popFrontMFNaNbNiNfZv@Base 12
+ _D3std3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImpl9__xtoHashFNbNeKxSQCpQCo__TQCnTQCeZQCvFQCmZQChZm@Base 12
+ _D3std3utf__T10byCodeUnitTAxaZQrFNaNbNiNfQpZSQBrQBq__TQBpTQBgZQBxFQBoZ14ByCodeUnitImpl@Base 12
+ _D3std3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImpl11__xopEqualsMxFKxSQCrQCq__TQCpTQCgZQCxFQCoZQCiZb@Base 12
+ _D3std3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImpl4backMNgFNaNbNcNdNiNfZNgxa@Base 12
+ _D3std3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImpl4saveMFNaNbNdNiNfZSQCrQCq__TQCpTQCgZQCxFQCoZQCi@Base 12
+ _D3std3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImpl5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImpl5frontMNgFNaNbNcNdNiNfZNgxa@Base 12
+ _D3std3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImpl6__initZ@Base 12
+ _D3std3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImpl6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImpl7opIndexMNgFNaNbNcNiNfmZNgxa@Base 12
+ _D3std3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImpl7opSliceMFNaNbNiNfmmZSQCuQCt__TQCsTQCjZQDaFQCrZQCl@Base 12
+ _D3std3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImpl7popBackMFNaNbNiNfZv@Base 12
+ _D3std3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImpl8popFrontMFNaNbNiNfZv@Base 12
+ _D3std3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImpl9__xtoHashFNbNeKxSQCqQCp__TQCoTQCfZQCwFQCnZQChZm@Base 12
+ _D3std3utf__T10byCodeUnitTAxuZQrFNaNbNiNfQpZSQBrQBq__TQBpTQBgZQBxFQBoZ14ByCodeUnitImpl@Base 12
+ _D3std3utf__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImpl11__xopEqualsMxFKxSQCrQCq__TQCpTQCgZQCxFQCoZQCiZb@Base 12
+ _D3std3utf__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImpl4backMNgFNaNbNcNdNiNfZNgxu@Base 12
+ _D3std3utf__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImpl4saveMFNaNbNdNiNfZSQCrQCq__TQCpTQCgZQCxFQCoZQCi@Base 12
+ _D3std3utf__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImpl5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std3utf__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImpl5frontMNgFNaNbNcNdNiNfZNgxu@Base 12
+ _D3std3utf__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImpl6__initZ@Base 12
+ _D3std3utf__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImpl6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std3utf__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImpl7opIndexMNgFNaNbNcNiNfmZNgxu@Base 12
+ _D3std3utf__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImpl7opSliceMFNaNbNiNfmmZSQCuQCt__TQCsTQCjZQDaFQCrZQCl@Base 12
+ _D3std3utf__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImpl7popBackMFNaNbNiNfZv@Base 12
+ _D3std3utf__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImpl8popFrontMFNaNbNiNfZv@Base 12
+ _D3std3utf__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImpl9__xtoHashFNbNeKxSQCqQCp__TQCoTQCfZQCwFQCnZQChZm@Base 12
+ _D3std3utf__T10byCodeUnitTAxwZQrFNaNbNiNfQpZQs@Base 12
+ _D3std3utf__T10byCodeUnitTAyaZQrFNaNbNiNfQpZSQBrQBq__TQBpTQBgZQBxFQBoZ14ByCodeUnitImpl@Base 12
+ _D3std3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImpl11__xopEqualsMxFKxSQCrQCq__TQCpTQCgZQCxFQCoZQCiZb@Base 12
+ _D3std3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImpl4backMNgFNaNbNcNdNiNfZya@Base 12
+ _D3std3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImpl4saveMFNaNbNdNiNfZSQCrQCq__TQCpTQCgZQCxFQCoZQCi@Base 12
+ _D3std3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImpl5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImpl5frontMNgFNaNbNcNdNiNfZya@Base 12
+ _D3std3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImpl6__initZ@Base 12
+ _D3std3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImpl6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImpl7opIndexMNgFNaNbNcNiNfmZya@Base 12
+ _D3std3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImpl7opSliceMFNaNbNiNfmmZSQCuQCt__TQCsTQCjZQDaFQCrZQCl@Base 12
+ _D3std3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImpl7popBackMFNaNbNiNfZv@Base 12
+ _D3std3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImpl8popFrontMFNaNbNiNfZv@Base 12
+ _D3std3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImpl9__xtoHashFNbNeKxSQCqQCp__TQCoTQCfZQCwFQCnZQChZm@Base 12
+ _D3std3utf__T10byCodeUnitTAyuZQrFNaNbNiNfQpZSQBrQBq__TQBpTQBgZQBxFQBoZ14ByCodeUnitImpl@Base 12
+ _D3std3utf__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImpl11__xopEqualsMxFKxSQCrQCq__TQCpTQCgZQCxFQCoZQCiZb@Base 12
+ _D3std3utf__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImpl4backMNgFNaNbNcNdNiNfZyu@Base 12
+ _D3std3utf__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImpl4saveMFNaNbNdNiNfZSQCrQCq__TQCpTQCgZQCxFQCoZQCi@Base 12
+ _D3std3utf__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImpl5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std3utf__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImpl5frontMNgFNaNbNcNdNiNfZyu@Base 12
+ _D3std3utf__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImpl6__initZ@Base 12
+ _D3std3utf__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImpl6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std3utf__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImpl7opIndexMNgFNaNbNcNiNfmZyu@Base 12
+ _D3std3utf__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImpl7opSliceMFNaNbNiNfmmZSQCuQCt__TQCsTQCjZQDaFQCrZQCl@Base 12
+ _D3std3utf__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImpl7popBackMFNaNbNiNfZv@Base 12
+ _D3std3utf__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImpl8popFrontMFNaNbNiNfZv@Base 12
+ _D3std3utf__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImpl9__xtoHashFNbNeKxSQCqQCp__TQCoTQCfZQCwFQCnZQChZm@Base 12
+ _D3std3utf__T10byCodeUnitTAywZQrFNaNbNiNfQpZQs@Base 12
+ _D3std3utf__T10byCodeUnitTSQz4path__T16asNormalizedPathTSQCd5range__T5chainTSQCxQCw__TQCvTAyaZQDdFQiZ14ByCodeUnitImplTSQEnQCk__T10OnlyResultTaZQpTQCsZQDdFQDaQBnQDgZ6ResultZQFfFNkMQEtZQtZQGrFNaNbNiNfQGqZQGu@Base 12
+ _D3std3utf__T10byCodeUnitTSQz5range__T5chainTSQBsQBr__TQBqTAxaZQByFQiZ14ByCodeUnitImplTSQDiQCk__T10OnlyResultTaZQpTQCsZQDdFQDaQBnQDgZ6ResultZQEyFNaNbNiNfQExZQFb@Base 12
+ _D3std3utf__T10byCodeUnitTSQzQx__TQvTAaZQBbFQhZ14ByCodeUnitImplZQBzFNaNbNiNfQByZQCc@Base 12
+ _D3std3utf__T10byCodeUnitTSQzQx__TQvTAxaZQBcFQiZ14ByCodeUnitImplZQCaFNaNbNiNfQBzZQCd@Base 12
+ _D3std3utf__T10byCodeUnitTSQzQx__TQvTAyaZQBcFQiZ14ByCodeUnitImplZQCaFNaNbNiNfQBzZQCd@Base 12
+ _D3std3utf__T10codeLengthTaZQpFNaNbNiNfwZh@Base 12
+ _D3std3utf__T10codeLengthTuZQpFNaNbNiNfwZh@Base 12
+ _D3std3utf__T10codeLengthTwZQpFNaNbNiNfwZh@Base 12
+ _D3std3utf__T10codeLengthTxaZQqFNaNbNiNfwZh@Base 12
+ _D3std3utf__T10codeLengthTyaZQqFNaNbNiNfwZh@Base 12
+ _D3std3utf__T10codeLengthTyuZQqFNaNbNiNfwZh@Base 12
+ _D3std3utf__T10codeLengthTywZQqFNaNbNiNfwZh@Base 12
+ _D3std3utf__T10decodeBackVEQz8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TSQDuQDt__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQFfFNaNfKQCgJmZw@Base 12
+ _D3std3utf__T10decodeBackVEQz8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TSQDuQDt__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQFfFNaNfKQCgZw@Base 12
+ _D3std3utf__T10decodeBackVEQz8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TSQDuQDt__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQFgFNaNfKQChJmZw@Base 12
+ _D3std3utf__T10decodeBackVEQz8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TSQDuQDt__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQFgFNaNfKQChZw@Base 12
+ _D3std3utf__T10decodeBackVEQz8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TSQDuQDt__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImplZQFgFNaNbNiNfKQClJmZw@Base 12
+ _D3std3utf__T10decodeBackVEQz8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TSQDuQDt__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImplZQFgFNaNbNiNfKQClZw@Base 12
+ _D3std3utf__T10decodeBackVEQz8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TSQDuQDt__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImplZQFgFNaNbNiNfKQClJmZw@Base 12
+ _D3std3utf__T10decodeBackVEQz8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TSQDuQDt__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImplZQFgFNaNbNiNfKQClZw@Base 12
+ _D3std3utf__T10decodeBackVEQz8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TSQDuQDt__T10byCodeUnitTQCxZQrFQDeZ14ByCodeUnitImplZQFhFNaNfKQCiJmZw@Base 12
+ _D3std3utf__T10decodeBackVEQz8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TSQDuQDt__T10byCodeUnitTQCxZQrFQDeZ14ByCodeUnitImplZQFhFNaNfKQCiZw@Base 12
+ _D3std3utf__T10decodeImplVbi1VEQBd8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai0TAxaZQDrFKQjKmZ__T9exceptionTQBcZQpFNaNbNfQBpQDsZCQFvQFu12UTFException@Base 12
+ _D3std3utf__T10decodeImplVbi1VEQBd8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai0TAxaZQDrFNaKQlKmZw@Base 12
+ _D3std3utf__T10decodeImplVbi1VEQBd8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai0TAxaZQDrFNaQkKmZw@Base 12
+ _D3std3utf__T10decodeImplVbi1VEQBd8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai0TAxaZQDrFQiKmZ__T9exceptionTQBbZQpFNaNbNfQBoQDrZCQFuQFt12UTFException@Base 12
+ _D3std3utf__T10decodeImplVbi1VEQBd8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai0TAxuZQDrFNaKQlKmZw@Base 12
+ _D3std3utf__T10decodeImplVbi1VEQBd8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai0TAxuZQDrFNaQkKmZw@Base 12
+ _D3std3utf__T10decodeImplVbi1VEQBd8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai0TAxwZQDrFNaKQlKmZw@Base 12
+ _D3std3utf__T10decodeImplVbi1VEQBd8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai0TAxwZQDrFNaQkKmZw@Base 12
+ _D3std3utf__T10decodeImplVbi1VEQBd8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TAxaZQDrFNaNbNiKQpKmZw@Base 12
+ _D3std3utf__T10decodeImplVbi1VEQBd8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TAxaZQDrFNaNbNiQoKmZw@Base 12
+ _D3std3utf__T10decodeImplVbi1VEQBd8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TAxuZQDrFNaNbNiKQpKmZw@Base 12
+ _D3std3utf__T10decodeImplVbi1VEQBd8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TAxwZQDrFNaNbNiKQpKmZw@Base 12
+ _D3std3utf__T10decodeImplVbi1VEQBd8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TSQDzQDy__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQFkFNaNbNiNfKQCkKmZw@Base 12
+ _D3std3utf__T10decodeImplVbi1VEQBd8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TSQDzQDy__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQFlFNaNbNiNfKQClKmZw@Base 12
+ _D3std3utf__T10decodeImplVbi1VEQBd8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TSQDzQDy__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImplZQFlFNaNbNiNfKQClKmZw@Base 12
+ _D3std3utf__T10decodeImplVbi1VEQBd8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TSQDzQDy__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImplZQFlFNaNbNiNfKQClKmZw@Base 12
+ _D3std3utf__T10decodeImplVbi1VEQBd8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TSQDzQDy__T10byCodeUnitTQCxZQrFQDeZ14ByCodeUnitImplZQFmFNaNbNiNfKQCmKmZw@Base 12
+ _D3std3utf__T10strideBackTANgaZQsFNaNfKQnmZk@Base 12
+ _D3std3utf__T10strideBackTAxaZQrFNaNfKQmmZk@Base 12
+ _D3std3utf__T10strideBackTSQzQx__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQCiFNaNfKQCeZk@Base 12
+ _D3std3utf__T10strideBackTSQzQx__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQCiFNaNfKQCemZk@Base 12
+ _D3std3utf__T10strideBackTSQzQx__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQCjFNaNfKQCfZk@Base 12
+ _D3std3utf__T10strideBackTSQzQx__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQCjFNaNfKQCfmZk@Base 12
+ _D3std3utf__T10strideBackTSQzQx__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImplZQCjFNaNbNiNfKQCjZk@Base 12
+ _D3std3utf__T10strideBackTSQzQx__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplZQCjFNaNfKQCfZk@Base 12
+ _D3std3utf__T10strideBackTSQzQx__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplZQCjFNaNfKQCfmZk@Base 12
+ _D3std3utf__T10strideBackTSQzQx__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImplZQCjFNaNbNiNfKQCjZk@Base 12
+ _D3std3utf__T10toUTFzImplTPaTAxaZQuFNaNbNfNkMQqZQw@Base 12
+ _D3std3utf__T10toUTFzImplTPaTAyaZQuFNaNbNfNkMQqZQw@Base 12
+ _D3std3utf__T11decodeFrontVEQBa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai0TAaZQDnFNaNeMKQnJmZw@Base 12
+ _D3std3utf__T11decodeFrontVEQBa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai0TAaZQDnFNaNfKQmZw@Base 12
+ _D3std3utf__T11decodeFrontVEQBa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai0TAwZQDnFNaNeMKQnJmZw@Base 12
+ _D3std3utf__T11decodeFrontVEQBa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai0TAwZQDnFNaNfKQmZw@Base 12
+ _D3std3utf__T11decodeFrontVEQBa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TAxaZQDoFNaNbNiNeMKQsJmZw@Base 12
+ _D3std3utf__T11decodeFrontVEQBa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TAxaZQDoFNaNbNiNfKQrZw@Base 12
+ _D3std3utf__T11decodeFrontVEQBa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TAxuZQDoFNaNbNiNeMKQsJmZw@Base 12
+ _D3std3utf__T11decodeFrontVEQBa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TAxuZQDoFNaNbNiNfKQrZw@Base 12
+ _D3std3utf__T11decodeFrontVEQBa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TAxwZQDoFNaNbNiNeMKQsJmZw@Base 12
+ _D3std3utf__T11decodeFrontVEQBa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TAxwZQDoFNaNbNiNfKQrZw@Base 12
+ _D3std3utf__T11decodeFrontVEQBa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TSQDwQDv__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQFhFNaNbNiNfKQCkJmZw@Base 12
+ _D3std3utf__T11decodeFrontVEQBa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TSQDwQDv__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQFhFNaNbNiNfKQCkZw@Base 12
+ _D3std3utf__T11decodeFrontVEQBa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TSQDwQDv__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQFiFNaNbNiNfKQClJmZw@Base 12
+ _D3std3utf__T11decodeFrontVEQBa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TSQDwQDv__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQFiFNaNbNiNfKQClZw@Base 12
+ _D3std3utf__T11decodeFrontVEQBa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TSQDwQDv__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImplZQFiFNaNbNiNfKQClJmZw@Base 12
+ _D3std3utf__T11decodeFrontVEQBa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TSQDwQDv__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImplZQFiFNaNbNiNfKQClZw@Base 12
+ _D3std3utf__T11decodeFrontVEQBa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TSQDwQDv__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImplZQFiFNaNbNiNfKQClJmZw@Base 12
+ _D3std3utf__T11decodeFrontVEQBa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TSQDwQDv__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImplZQFiFNaNbNiNfKQClZw@Base 12
+ _D3std3utf__T11decodeFrontVEQBa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TSQDwQDv__T10byCodeUnitTQCxZQrFQDeZ14ByCodeUnitImplZQFjFNaNbNiNfKQCmJmZw@Base 12
+ _D3std3utf__T11decodeFrontVEQBa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TSQDwQDv__T10byCodeUnitTQCxZQrFQDeZ14ByCodeUnitImplZQFjFNaNbNiNfKQCmZw@Base 12
+ _D3std3utf__T13_utfExceptionVEQBc8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai0ZQDmFNaNfQCiwZw@Base 12
+ _D3std3utf__T13_utfExceptionVEQBc8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1ZQDmFNaNbNiNfQCmwZw@Base 12
+ _D3std3utf__T20canSearchInCodeUnitsTaZQzFNaNbNiNfwZb@Base 12
+ _D3std3utf__T5byUTFTaVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTAaZQDoFNaNbNiNfQpZSQEpQEo__T10byCodeUnitTQBpZQrFQBwZ14ByCodeUnitImpl@Base 12
+ _D3std3utf__T5byUTFTaVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTAxaZQDpFNaNbNiNfQqZSQEqQEp__T10byCodeUnitTQBqZQrFQBxZ14ByCodeUnitImpl@Base 12
+ _D3std3utf__T5byUTFTaVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTQChZQDpFNaNbNiNfQCxZSQErQEq__T10byCodeUnitTQDyZQrFQEfZ14ByCodeUnitImpl@Base 12
+ _D3std3utf__T5byUTFTaVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDx4path__T16asNormalizedPathTSQFc5range__T5chainTSQFwQFv__T10byCodeUnitTQFdZQrFQFkZ14ByCodeUnitImplTSQHvQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFoFNkMQFcZQtZQJzFNaNbNiNfQHaZQHe@Base 12
+ _D3std3utf__T5byUTFTaVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDx5range__T5chainTSQErQEq__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQGpQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6ResultZQIfFNaNbNiNfQFgZQFk@Base 12
+ _D3std3utf__T5byUTFTaVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDx6string__T14rightJustifierTSQFcQFb__TQFaTwVQExi1Z__TQFpTSQGfQGe__T10byCodeUnitTQFmZQrFQFtZ14ByCodeUnitImplZQHsFNcQCfZ6ResultZQElFQDymwZQsZQIxFNaNbNiNfQFyZSQJzQJy__TQJxTaVQJui1Z__TQKmTQHfZQKuFNcQHpZQDc@Base 12
+ _D3std3utf__T5byUTFTaVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDx6string__T14rightJustifierTSQFcQFb__TQFaTwVQExi1Z__TQFpTSQGfQGe__T10byCodeUnitTQFmZQrFQFtZ14ByCodeUnitImplZQHsFNcQCfZ6ResultZQElFQDymwZQsZQIxFNcQFsZQBf11__xopEqualsMxFKxSQKoQKn__TQKmTaVQKji1Z__TQLbTQHuZQLjFNcQIeZQDrZb@Base 12
+ _D3std3utf__T5byUTFTaVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDx6string__T14rightJustifierTSQFcQFb__TQFaTwVQExi1Z__TQFpTSQGfQGe__T10byCodeUnitTQFmZQrFQFtZ14ByCodeUnitImplZQHsFNcQCfZ6ResultZQElFQDymwZQsZQIxFNcQFsZQBf4saveMFNaNbNdNiNfZSQKoQKn__TQKmTaVQKji1Z__TQLbTQHuZQLjFNcQIeZQDr@Base 12
+ _D3std3utf__T5byUTFTaVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDx6string__T14rightJustifierTSQFcQFb__TQFaTwVQExi1Z__TQFpTSQGfQGe__T10byCodeUnitTQFmZQrFQFtZ14ByCodeUnitImplZQHsFNcQCfZ6ResultZQElFQDymwZQsZQIxFNcQFsZQBf5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std3utf__T5byUTFTaVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDx6string__T14rightJustifierTSQFcQFb__TQFaTwVQExi1Z__TQFpTSQGfQGe__T10byCodeUnitTQFmZQrFQFtZ14ByCodeUnitImplZQHsFNcQCfZ6ResultZQElFQDymwZQsZQIxFNcQFsZQBf5frontMFNaNbNdNiNlNfZa@Base 12
+ _D3std3utf__T5byUTFTaVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDx6string__T14rightJustifierTSQFcQFb__TQFaTwVQExi1Z__TQFpTSQGfQGe__T10byCodeUnitTQFmZQrFQFtZ14ByCodeUnitImplZQHsFNcQCfZ6ResultZQElFQDymwZQsZQIxFNcQFsZQBf6__ctorMFNaNbNcNiNfNkMQGvZSQKwQKv__TQKuTaVQKri1Z__TQLjTQIcZQLrFNcQImZQDz@Base 12
+ _D3std3utf__T5byUTFTaVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDx6string__T14rightJustifierTSQFcQFb__TQFaTwVQExi1Z__TQFpTSQGfQGe__T10byCodeUnitTQFmZQrFQFtZ14ByCodeUnitImplZQHsFNcQCfZ6ResultZQElFQDymwZQsZQIxFNcQFsZQBf6__ctorMFNaNbNcNiNfNkMQGvttG4aZSQLbQLa__TQKzTaVQKwi1Z__TQLoTQIhZQLwFNcQIrZQEe@Base 12
+ _D3std3utf__T5byUTFTaVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDx6string__T14rightJustifierTSQFcQFb__TQFaTwVQExi1Z__TQFpTSQGfQGe__T10byCodeUnitTQFmZQrFQFtZ14ByCodeUnitImplZQHsFNcQCfZ6ResultZQElFQDymwZQsZQIxFNcQFsZQBf6__initZ@Base 12
+ _D3std3utf__T5byUTFTaVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDx6string__T14rightJustifierTSQFcQFb__TQFaTwVQExi1Z__TQFpTSQGfQGe__T10byCodeUnitTQFmZQrFQFtZ14ByCodeUnitImplZQHsFNcQCfZ6ResultZQElFQDymwZQsZQIxFNcQFsZQBf8popFrontMFNaNbNiNfZv@Base 12
+ _D3std3utf__T5byUTFTaVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDx6string__T14rightJustifierTSQFcQFb__TQFaTwVQExi1Z__TQFpTSQGfQGe__T10byCodeUnitTQFmZQrFQFtZ14ByCodeUnitImplZQHsFNcQCfZ6ResultZQElFQDymwZQsZQIxFNcQFsZQBf9__xtoHashFNbNeKxSQKnQKm__TQKlTaVQKii1Z__TQLaTQHtZQLiFNcQIdZQDqZm@Base 12
+ _D3std3utf__T5byUTFTaVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQFiFNaNbNiNfQCjZQCn@Base 12
+ _D3std3utf__T5byUTFTaVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQFjFNaNbNiNfQCkZQCo@Base 12
+ _D3std3utf__T5byUTFTaVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFkFNaNbNiNfQClZQCp@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTAaZQDoFNaNbNiNfQpZSQEpQEo__TQEnTwVQEki1Z__TQFcTSQFsQFr__T10byCodeUnitTQCsZQrFQCzZ14ByCodeUnitImplZQHfFNcQCfZ6Result@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTAxaZQDpFNaNbNiNfQqZSQEqQEp__TQEoTwVQEli1Z__TQFdTSQFtQFs__T10byCodeUnitTQCtZQrFQDaZ14ByCodeUnitImplZQHgFNcQCfZ6Result@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTAxuZQDpFNaNbNiNfQqZSQEqQEp__TQEoTwVQEli1Z__TQFdTSQFtQFs__T10byCodeUnitTQCtZQrFQDaZ14ByCodeUnitImplZQHgFNcQCfZ6Result@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTAxwZQDpFNaNbNiNfQqZQt@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTAyuZQDpFNaNbNiNfQqZSQEqQEp__TQEoTwVQEli1Z__TQFdTSQFtQFs__T10byCodeUnitTQCtZQrFQDaZ14ByCodeUnitImplZQHgFNcQCfZ6Result@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTAywZQDpFNaNbNiNfQqZQt@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTQChZQDpFNaNbNiNfQCxZSQErQEq__TQEpTwVQEmi1Z__TQFeTSQFuQFt__T10byCodeUnitTQFbZQrFQFiZ14ByCodeUnitImplZQHhFNcQCfZ6Result@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQFiFNaNbNiNfQCjZSQGkQGj__TQGiTwVQGfi1Z__TQGxTQDqZQHfFNcQEaZ6Result@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQFiFNcQCdZ6Result11__xopEqualsMxFKxSQHdQHc__TQHbTwVQGyi1Z__TQHqTQEjZQHyFNcQEtZQCqZb@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQFiFNcQCdZ6Result4backMFNaNdNlNfZw@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQFiFNcQCdZ6Result4saveMFNaNbNdNiNfZSQHdQHc__TQHbTwVQGyi1Z__TQHqTQEjZQHyFNcQEtZQCq@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQFiFNcQCdZ6Result5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQFiFNcQCdZ6Result5frontMFNaNbNdNiNlNfZw@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQFiFNcQCdZ6Result6__ctorMFNaNbNcNiNfNkMQDkZSQHlQHk__TQHjTwVQHgi1Z__TQHyTQErZQIgFNcQFbZQCy@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQFiFNcQCdZ6Result6__ctorMFNaNbNcNiNfNkMQDkkZSQHmQHl__TQHkTwVQHhi1Z__TQHzTQEsZQIhFNcQFcZQCz@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQFiFNcQCdZ6Result6__ctorMFNaNbNcNiNfNkMQDkkkZSQHnQHm__TQHlTwVQHii1Z__TQIaTQEtZQIiFNcQFdZQDa@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQFiFNcQCdZ6Result6__initZ@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQFiFNcQCdZ6Result7popBackMFNaNfZv@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQFiFNcQCdZ6Result8popFrontMFNaNbNiNfZv@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQFiFNcQCdZ6Result9__xtoHashFNbNeKxSQHcQHb__TQHaTwVQGxi1Z__TQHpTQEiZQHxFNcQEsZQCpZm@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQFjFNaNbNiNfQCkZSQGlQGk__TQGjTwVQGgi1Z__TQGyTQDrZQHgFNcQEbZ6Result@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result11__xopEqualsMxFKxSQHeQHd__TQHcTwVQGzi1Z__TQHrTQEkZQHzFNcQEuZQCqZb@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result4backMFNaNdNlNfZw@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result4saveMFNaNbNdNiNfZSQHeQHd__TQHcTwVQGzi1Z__TQHrTQEkZQHzFNcQEuZQCq@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result5frontMFNaNbNdNiNlNfZw@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result6__ctorMFNaNbNcNiNfNkMQDlZSQHmQHl__TQHkTwVQHhi1Z__TQHzTQEsZQIhFNcQFcZQCy@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result6__ctorMFNaNbNcNiNfNkMQDlkZSQHnQHm__TQHlTwVQHii1Z__TQIaTQEtZQIiFNcQFdZQCz@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result6__ctorMFNaNbNcNiNfNkMQDlkkZSQHoQHn__TQHmTwVQHji1Z__TQIbTQEuZQIjFNcQFeZQDa@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result6__initZ@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result7popBackMFNaNfZv@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result8popFrontMFNaNbNiNfZv@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result9__xtoHashFNbNeKxSQHdQHc__TQHbTwVQGyi1Z__TQHqTQEjZQHyFNcQEtZQCpZm@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImplZQFjFNaNbNiNfQCkZSQGlQGk__TQGjTwVQGgi1Z__TQGyTQDrZQHgFNcQEbZ6Result@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result11__xopEqualsMxFKxSQHeQHd__TQHcTwVQGzi1Z__TQHrTQEkZQHzFNcQEuZQCqZb@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result4backMFNaNbNdNiNlNfZw@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result4saveMFNaNbNdNiNfZSQHeQHd__TQHcTwVQGzi1Z__TQHrTQEkZQHzFNcQEuZQCq@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result5frontMFNaNbNdNiNlNfZw@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result6__ctorMFNaNbNcNiNfNkMQDlZSQHmQHl__TQHkTwVQHhi1Z__TQHzTQEsZQIhFNcQFcZQCy@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result6__ctorMFNaNbNcNiNfNkMQDlkZSQHnQHm__TQHlTwVQHii1Z__TQIaTQEtZQIiFNcQFdZQCz@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result6__ctorMFNaNbNcNiNfNkMQDlkkZSQHoQHn__TQHmTwVQHji1Z__TQIbTQEuZQIjFNcQFeZQDa@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result6__initZ@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result7popBackMFNaNbNiNfZv@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result8popFrontMFNaNbNiNfZv@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result9__xtoHashFNbNeKxSQHdQHc__TQHbTwVQGyi1Z__TQHqTQEjZQHyFNcQEtZQCpZm@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImplZQFjFNaNbNiNfQCkZSQGlQGk__TQGjTwVQGgi1Z__TQGyTQDrZQHgFNcQEbZ6Result@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result11__xopEqualsMxFKxSQHeQHd__TQHcTwVQGzi1Z__TQHrTQEkZQHzFNcQEuZQCqZb@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result4backMFNaNbNdNiNlNfZw@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result4saveMFNaNbNdNiNfZSQHeQHd__TQHcTwVQGzi1Z__TQHrTQEkZQHzFNcQEuZQCq@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result5frontMFNaNbNdNiNlNfZw@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result6__ctorMFNaNbNcNiNfNkMQDlZSQHmQHl__TQHkTwVQHhi1Z__TQHzTQEsZQIhFNcQFcZQCy@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result6__ctorMFNaNbNcNiNfNkMQDlkZSQHnQHm__TQHlTwVQHii1Z__TQIaTQEtZQIiFNcQFdZQCz@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result6__ctorMFNaNbNcNiNfNkMQDlkkZSQHoQHn__TQHmTwVQHji1Z__TQIbTQEuZQIjFNcQFeZQDa@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result6__initZ@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result7popBackMFNaNbNiNfZv@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result8popFrontMFNaNbNiNfZv@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImplZQFjFNcQCeZ6Result9__xtoHashFNbNeKxSQHdQHc__TQHbTwVQGyi1Z__TQHqTQEjZQHyFNcQEtZQCpZm@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFkFNaNbNiNfQClZSQGmQGl__TQGkTwVQGhi1Z__TQGzTQDsZQHhFNcQEcZ6Result@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFkFNcQCfZ6Result11__xopEqualsMxFKxSQHfQHe__TQHdTwVQHai1Z__TQHsTQElZQIaFNcQEvZQCqZb@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFkFNcQCfZ6Result4backMFNaNdNlNfZw@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFkFNcQCfZ6Result4saveMFNaNbNdNiNfZSQHfQHe__TQHdTwVQHai1Z__TQHsTQElZQIaFNcQEvZQCq@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFkFNcQCfZ6Result5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFkFNcQCfZ6Result5frontMFNaNbNdNiNlNfZw@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFkFNcQCfZ6Result6__ctorMFNaNbNcNiNfNkMQDmZSQHnQHm__TQHlTwVQHii1Z__TQIaTQEtZQIiFNcQFdZQCy@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFkFNcQCfZ6Result6__ctorMFNaNbNcNiNfNkMQDmkZSQHoQHn__TQHmTwVQHji1Z__TQIbTQEuZQIjFNcQFeZQCz@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFkFNcQCfZ6Result6__ctorMFNaNbNcNiNfNkMQDmkkZSQHpQHo__TQHnTwVQHki1Z__TQIcTQEvZQIkFNcQFfZQDa@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFkFNcQCfZ6Result6__initZ@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFkFNcQCfZ6Result7popBackMFNaNfZv@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFkFNcQCfZ6Result8popFrontMFNaNbNiNfZv@Base 12
+ _D3std3utf__T5byUTFTwVEQv8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDhTSQDxQDw__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFkFNcQCfZ6Result9__xtoHashFNbNeKxSQHeQHd__TQHcTwVQGzi1Z__TQHrTQEkZQHzFNcQEuZQCpZm@Base 12
+ _D3std3utf__T6decodeVEQu8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai0TAaZQDgFNaNeMKQnKmZw@Base 12
+ _D3std3utf__T6decodeVEQu8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai0TAuZQDgFNaNeMKQnKmZw@Base 12
+ _D3std3utf__T6decodeVEQu8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai0TAwZQDgFNaNeMKQnKmZw@Base 12
+ _D3std3utf__T6decodeVEQu8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai0TAxaZQDhFNaNeMKQoKmZw@Base 12
+ _D3std3utf__T6decodeVEQu8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai0TAxuZQDhFNaNeMKQoKmZw@Base 12
+ _D3std3utf__T6decodeVEQu8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai0TQCaZQDhFNaNeMKQCoKmZw@Base 12
+ _D3std3utf__T6decodeVEQu8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai0TxAaZQDhFNaNeMKxQoKmZw@Base 12
+ _D3std3utf__T6decodeVEQu8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai0TxAuZQDhFNaNeMKxQoKmZw@Base 12
+ _D3std3utf__T6decodeVEQu8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai0TxAwZQDhFNaNeMKxQoKmZw@Base 12
+ _D3std3utf__T6decodeVEQu8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1TQCaZQDhFNaNbNiNeMKQCsKmZw@Base 12
+ _D3std3utf__T6encodeVEQu8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai0ZQDdFNaNfJG1wwZm@Base 12
+ _D3std3utf__T6encodeVEQu8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai0ZQDdFNaNfJG2uwZm@Base 12
+ _D3std3utf__T6encodeVEQu8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai0ZQDdFNaNfJG4awZm@Base 12
+ _D3std3utf__T6encodeVEQu8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai0ZQDdFNaNfMKAawZv@Base 12
+ _D3std3utf__T6encodeVEQu8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1ZQDdFNaNbNiNfJG4awZm@Base 12
+ _D3std3utf__T6strideTAaZQlFNaNfQkZk@Base 12
+ _D3std3utf__T6strideTAxaZQmFNaNfQlmZk@Base 12
+ _D3std3utf__T6strideTAyaZQmFNaNfKQmmZk@Base 12
+ _D3std3utf__T6toUTFzTPaZ__TQoTAyaZQvFNaNbNfQnZQz@Base 12
+ _D3std3utf__T7toUTF32TAxaZQnFNaNbNfMQoZAyw@Base 12
+ _D3std3utf__T7toUTF32TAyaZQnFNaNbNfMQoZAyw@Base 12
+ _D3std3utf__T9toUTFImplTAywTAxaZQtFNaNbNfMQoZQv@Base 12
+ _D3std3utf__T9toUTFImplTAywTAyaZQtFNaNbNfMQoZQv@Base 12
+ _D3std3xml10DigitTableyAi@Base 12
+ _D3std3xml10checkCharsFNaNfKAyaZv@Base 12
+ _D3std3xml10checkSpaceFNaNfKAyaZv@Base 12
+ _D3std3xml10isBaseCharFNaNbNiNfwZb@Base 12
+ _D3std3xml10isExtenderFNaNbNiNfwZb@Base 12
+ _D3std3xml11PIException6__ctorMFNaNfAyaZCQBnQBmQBl@Base 12
+ _D3std3xml11PIException6__initZ@Base 12
+ _D3std3xml11PIException6__vtblZ@Base 12
+ _D3std3xml11PIException7__ClassZ@Base 12
+ _D3std3xml11XIException6__ctorMFNaNfAyaZCQBnQBmQBl@Base 12
+ _D3std3xml11XIException6__initZ@Base 12
+ _D3std3xml11XIException6__vtblZ@Base 12
+ _D3std3xml11XIException7__ClassZ@Base 12
+ _D3std3xml11__moduleRefZ@Base 12
+ _D3std3xml11checkCDSectFNaNfKAyaZv@Base 12
+ _D3std3xml11checkPrologFNaNfKAyaZv@Base 12
+ _D3std3xml11checkSDDeclFNaNfKAyaZv@Base 12
+ _D3std3xml12TagException6__ctorMFNaNfAyaZCQBoQBnQBm@Base 12
+ _D3std3xml12TagException6__initZ@Base 12
+ _D3std3xml12TagException6__vtblZ@Base 12
+ _D3std3xml12TagException7__ClassZ@Base 12
+ _D3std3xml12XMLException6__ctorMFNaNfAyaZCQBoQBnQBm@Base 12
+ _D3std3xml12XMLException6__initZ@Base 12
+ _D3std3xml12XMLException6__vtblZ@Base 12
+ _D3std3xml12XMLException7__ClassZ@Base 12
+ _D3std3xml12__ModuleInfoZ@Base 12
+ _D3std3xml12checkCharRefFNaNfKAyaJwZv@Base 12
+ _D3std3xml12checkCommentFNaNfKAyaZv@Base 12
+ _D3std3xml12checkContentFNaNfKAyaZv@Base 12
+ _D3std3xml12checkElementFNaNfKAyaZv@Base 12
+ _D3std3xml12checkEncNameFNaNfKAyaZv@Base 12
+ _D3std3xml12checkLiteralFNaNfAyaKQeZv@Base 12
+ _D3std3xml12checkXMLDeclFNaNfKAyaZv@Base 12
+ _D3std3xml12requireOneOfFNaNfKAyaQdZa@Base 12
+ _D3std3xml13BaseCharTableyAi@Base 12
+ _D3std3xml13ElementParser3tagMxFNaNbNdNiNfZxCQBrQBq3Tag@Base 12
+ _D3std3xml13ElementParser4onPIMFNaNbNdNiNfDFAyaZvZv@Base 12
+ _D3std3xml13ElementParser4onXIMFNaNbNdNiNfDFAyaZvZv@Base 12
+ _D3std3xml13ElementParser5parseMFZv@Base 12
+ _D3std3xml13ElementParser6__ctorMFNaNbNiNfCQBpQBo3TagPAyaZCQCfQCeQCd@Base 12
+ _D3std3xml13ElementParser6__ctorMFNaNbNiNfCQBpQBoQBnZQl@Base 12
+ _D3std3xml13ElementParser6__ctorMFNaNbNiNfZCQBqQBpQBo@Base 12
+ _D3std3xml13ElementParser6__initZ@Base 12
+ _D3std3xml13ElementParser6__vtblZ@Base 12
+ _D3std3xml13ElementParser6onTextMFNaNbNdNiNfDFAyaZvZv@Base 12
+ _D3std3xml13ElementParser7__ClassZ@Base 12
+ _D3std3xml13ElementParser7onCDataMFNaNbNdNiNfDFAyaZvZv@Base 12
+ _D3std3xml13ElementParser8toStringMxFNaNbNiNfZAya@Base 12
+ _D3std3xml13ElementParser9onCommentMFNaNbNdNiNfDFAyaZvZv@Base 12
+ _D3std3xml13ElementParser9onTextRawMFNaNbNiNfDFAyaZvZv@Base 12
+ _D3std3xml13ExtenderTableyAi@Base 12
+ _D3std3xml13TextException6__ctorMFNaNfAyaZCQBpQBoQBn@Base 12
+ _D3std3xml13TextException6__initZ@Base 12
+ _D3std3xml13TextException6__vtblZ@Base 12
+ _D3std3xml13TextException7__ClassZ@Base 12
+ _D3std3xml13checkAttValueFNaNfKAyaZv@Base 12
+ _D3std3xml13checkCharDataFNaNfKAyaZv@Base 12
+ _D3std3xml13checkDocumentFNaNfKAyaZv@Base 12
+ _D3std3xml13isIdeographicFNaNbNiNfwZb@Base 12
+ _D3std3xml14CDataException6__ctorMFNaNfAyaZCQBqQBpQBo@Base 12
+ _D3std3xml14CDataException6__initZ@Base 12
+ _D3std3xml14CDataException6__vtblZ@Base 12
+ _D3std3xml14CDataException7__ClassZ@Base 12
+ _D3std3xml14CheckException6__ctorMFNaNfAyaQdCQBrQBqQBpZQl@Base 12
+ _D3std3xml14CheckException6__initZ@Base 12
+ _D3std3xml14CheckException6__vtblZ@Base 12
+ _D3std3xml14CheckException7__ClassZ@Base 12
+ _D3std3xml14CheckException8completeMFNaNfAyaZv@Base 12
+ _D3std3xml14CheckException8toStringMxFNaNfZAya@Base 12
+ _D3std3xml14DocumentParser6__ctorMFAyaZCQBmQBlQBk@Base 12
+ _D3std3xml14DocumentParser6__initZ@Base 12
+ _D3std3xml14DocumentParser6__vtblZ@Base 12
+ _D3std3xml14DocumentParser7__ClassZ@Base 12
+ _D3std3xml14XMLInstruction10isEmptyXMLMxFNaNbNdNiNlNfZb@Base 12
+ _D3std3xml14XMLInstruction5opCmpMxFNlNfMxC6ObjectZi@Base 12
+ _D3std3xml14XMLInstruction6__ctorMFNaNfAyaZCQBqQBpQBo@Base 12
+ _D3std3xml14XMLInstruction6__initZ@Base 12
+ _D3std3xml14XMLInstruction6__vtblZ@Base 12
+ _D3std3xml14XMLInstruction6toHashMxFNbNlNfZm@Base 12
+ _D3std3xml14XMLInstruction7__ClassZ@Base 12
+ _D3std3xml14XMLInstruction8opEqualsMxFNfMxC6ObjectZb@Base 12
+ _D3std3xml14XMLInstruction8toStringMxFNaNbNlNfZAya@Base 12
+ _D3std3xml14checkAttributeFNaNfKAyaZv@Base 12
+ _D3std3xml14checkEntityRefFNaNfKAyaZv@Base 12
+ _D3std3xml14checkReferenceFNaNfKAyaZv@Base 12
+ _D3std3xml15DecodeException6__ctorMFNaNfAyaZCQBrQBqQBp@Base 12
+ _D3std3xml15DecodeException6__initZ@Base 12
+ _D3std3xml15DecodeException6__vtblZ@Base 12
+ _D3std3xml15DecodeException7__ClassZ@Base 12
+ _D3std3xml15checkVersionNumFNaNfKAyaZv@Base 12
+ _D3std3xml15isCombiningCharFNaNbNiNfwZb@Base 12
+ _D3std3xml16CommentException6__ctorMFNaNfAyaZCQBsQBrQBq@Base 12
+ _D3std3xml16CommentException6__initZ@Base 12
+ _D3std3xml16CommentException6__vtblZ@Base 12
+ _D3std3xml16CommentException7__ClassZ@Base 12
+ _D3std3xml16IdeographicTableyAi@Base 12
+ _D3std3xml16checkDocTypeDeclFNaNfKAyaZv@Base 12
+ _D3std3xml16checkVersionInfoFNaNfKAyaZv@Base 12
+ _D3std3xml17checkEncodingDeclFNaNfKAyaZv@Base 12
+ _D3std3xml18CombiningCharTableyAi@Base 12
+ _D3std3xml20InvalidTypeException6__ctorMFNaNfAyaZCQBwQBvQBu@Base 12
+ _D3std3xml20InvalidTypeException6__initZ@Base 12
+ _D3std3xml20InvalidTypeException6__vtblZ@Base 12
+ _D3std3xml20InvalidTypeException7__ClassZ@Base 12
+ _D3std3xml21ProcessingInstruction10isEmptyXMLMxFNaNbNdNiNlNfZb@Base 12
+ _D3std3xml21ProcessingInstruction5opCmpMxFNlNfMxC6ObjectZi@Base 12
+ _D3std3xml21ProcessingInstruction6__ctorMFNaNfAyaZCQBxQBwQBv@Base 12
+ _D3std3xml21ProcessingInstruction6__initZ@Base 12
+ _D3std3xml21ProcessingInstruction6__vtblZ@Base 12
+ _D3std3xml21ProcessingInstruction6toHashMxFNbNlNfZm@Base 12
+ _D3std3xml21ProcessingInstruction7__ClassZ@Base 12
+ _D3std3xml21ProcessingInstruction8opEqualsMxFNfMxC6ObjectZb@Base 12
+ _D3std3xml21ProcessingInstruction8toStringMxFNaNbNlNfZAya@Base 12
+ _D3std3xml3Tag11__invariantMxFZv@Base 12
+ _D3std3xml3Tag11toEndStringMxFNfZAya@Base 12
+ _D3std3xml3Tag12__invariant0MxFZv@Base 12
+ _D3std3xml3Tag13toEmptyStringMxFNfZAya@Base 12
+ _D3std3xml3Tag13toStartStringMxFNfZAya@Base 12
+ _D3std3xml3Tag14toNonEndStringMxFNfZAya@Base 12
+ _D3std3xml3Tag5isEndMxFNaNbNdNiNfZb@Base 12
+ _D3std3xml3Tag5opCmpMxFC6ObjectZi@Base 12
+ _D3std3xml3Tag6__ctorMFNaNfAyaEQBdQBc7TagTypeZCQBtQBsQBr@Base 12
+ _D3std3xml3Tag6__ctorMFNaNfKAyabZCQBgQBfQBe@Base 12
+ _D3std3xml3Tag6__initZ@Base 12
+ _D3std3xml3Tag6__vtblZ@Base 12
+ _D3std3xml3Tag6toHashMxFNbNfZm@Base 12
+ _D3std3xml3Tag7__ClassZ@Base 12
+ _D3std3xml3Tag7isEmptyMxFNaNbNdNiNfZb@Base 12
+ _D3std3xml3Tag7isStartMxFNaNbNdNiNfZb@Base 12
+ _D3std3xml3Tag8opEqualsMxFMC6ObjectZb@Base 12
+ _D3std3xml3Tag8toStringMxFNfZAya@Base 12
+ _D3std3xml4Item6__initZ@Base 12
+ _D3std3xml4Item6__vtblZ@Base 12
+ _D3std3xml4Item6prettyMxFNlNfkZAAya@Base 12
+ _D3std3xml4Item7__ClassZ@Base 12
+ _D3std3xml4Text10isEmptyXMLMxFNaNbNdNiNlNfZb@Base 12
+ _D3std3xml4Text5opCmpMxFNlNfMxC6ObjectZi@Base 12
+ _D3std3xml4Text6__ctorMFNaNfAyaZCQBfQBeQBd@Base 12
+ _D3std3xml4Text6__initZ@Base 12
+ _D3std3xml4Text6__vtblZ@Base 12
+ _D3std3xml4Text6toHashMxFNbNlNfZm@Base 12
+ _D3std3xml4Text7__ClassZ@Base 12
+ _D3std3xml4Text8opEqualsMxFNfMxC6ObjectZb@Base 12
+ _D3std3xml4Text8toStringMxFNaNbNiNlNfZAya@Base 12
+ _D3std3xml4chopFNaNbNfKAyamZQf@Base 12
+ _D3std3xml4exitFAyaZv@Base 12
+ _D3std3xml4optcFNaNbNfKAyaaZb@Base 12
+ _D3std3xml4reqcFNaNfKAyaaZv@Base 12
+ _D3std3xml5CData10isEmptyXMLMxFNaNbNdNiNlNfZb@Base 12
+ _D3std3xml5CData5opCmpMxFNlNfMxC6ObjectZi@Base 12
+ _D3std3xml5CData6__ctorMFNaNfAyaZCQBgQBfQBe@Base 12
+ _D3std3xml5CData6__initZ@Base 12
+ _D3std3xml5CData6__vtblZ@Base 12
+ _D3std3xml5CData6toHashMxFNbNlNfZm@Base 12
+ _D3std3xml5CData7__ClassZ@Base 12
+ _D3std3xml5CData8opEqualsMxFNfMxC6ObjectZb@Base 12
+ _D3std3xml5CData8toStringMxFNaNbNlNfZAya@Base 12
+ _D3std3xml5checkFNaNfAyaZv@Base 12
+ _D3std3xml6decodeFNaNfAyaEQyQw10DecodeModeZQv@Base 12
+ _D3std3xml6isCharFNaNbNiNfwZb@Base 12
+ _D3std3xml6lookupFNaNbNiNfAxiiZb@Base 12
+ _D3std3xml7Comment10isEmptyXMLMxFNaNbNdNiNlNfZb@Base 12
+ _D3std3xml7Comment5opCmpMxFNlNfMxC6ObjectZi@Base 12
+ _D3std3xml7Comment6__ctorMFNaNfAyaZCQBiQBhQBg@Base 12
+ _D3std3xml7Comment6__initZ@Base 12
+ _D3std3xml7Comment6__vtblZ@Base 12
+ _D3std3xml7Comment6toHashMxFNbNlNfZm@Base 12
+ _D3std3xml7Comment7__ClassZ@Base 12
+ _D3std3xml7Comment8opEqualsMxFNfMxC6ObjectZb@Base 12
+ _D3std3xml7Comment8toStringMxFNaNbNlNfZAya@Base 12
+ _D3std3xml7Element10appendItemMFNaNfCQBjQBi4ItemZv@Base 12
+ _D3std3xml7Element10isEmptyXMLMxFNaNbNdNiNlNfZb@Base 12
+ _D3std3xml7Element4textMxFEQzQx10DecodeModeZAya@Base 12
+ _D3std3xml7Element5opCmpMxFNfMxC6ObjectZi@Base 12
+ _D3std3xml7Element5parseMFCQzQx13ElementParserZv@Base 12
+ _D3std3xml7Element6__ctorMFNaNfAyaQdZCQBkQBjQBi@Base 12
+ _D3std3xml7Element6__ctorMFNaNfxCQBfQBe3TagZCQBrQBqQBp@Base 12
+ _D3std3xml7Element6__initZ@Base 12
+ _D3std3xml7Element6__vtblZ@Base 12
+ _D3std3xml7Element6prettyMxFNlNfkZAAya@Base 12
+ _D3std3xml7Element6toHashMxFNbNlNfZm@Base 12
+ _D3std3xml7Element7__ClassZ@Base 12
+ _D3std3xml7Element8opEqualsMxFNfMxC6ObjectZb@Base 12
+ _D3std3xml7Element8toStringMxFNlNfZAya@Base 12
+ _D3std3xml7Element__T10opOpAssignVAyaa1_7eZQwMFNaNfCQByQBx21ProcessingInstructionZv@Base 12
+ _D3std3xml7Element__T10opOpAssignVAyaa1_7eZQwMFNaNfCQByQBx4TextZv@Base 12
+ _D3std3xml7Element__T10opOpAssignVAyaa1_7eZQwMFNaNfCQByQBx5CDataZv@Base 12
+ _D3std3xml7Element__T10opOpAssignVAyaa1_7eZQwMFNaNfCQByQBx7CommentZv@Base 12
+ _D3std3xml7Element__T10opOpAssignVAyaa1_7eZQwMFNaNfCQByQBxQBwZv@Base 12
+ _D3std3xml7checkEqFNaNfKAyaZv@Base 12
+ _D3std3xml7checkPIFNaNfKAyaZv@Base 12
+ _D3std3xml7isDigitFNaNbNiNfwZb@Base 12
+ _D3std3xml7isSpaceFNaNbNiNfwZb@Base 12
+ _D3std3xml7startOfFNaNbNfAyaZQe@Base 12
+ _D3std3xml8Document5opCmpMxFNlNfMxC6ObjectZi@Base 12
+ _D3std3xml8Document6__ctorMFAyaZCQBfQBeQBd@Base 12
+ _D3std3xml8Document6__ctorMFxCQBcQBb3TagZCQBoQBnQBm@Base 12
+ _D3std3xml8Document6__initZ@Base 12
+ _D3std3xml8Document6__vtblZ@Base 12
+ _D3std3xml8Document6toHashMxFNbNlNeZm@Base 12
+ _D3std3xml8Document7__ClassZ@Base 12
+ _D3std3xml8Document8opEqualsMxFNfMxC6ObjectZb@Base 12
+ _D3std3xml8Document8toStringMxFNlNfZAya@Base 12
+ _D3std3xml8checkEndFNaNfAyaKQeZv@Base 12
+ _D3std3xml8checkTagFNaNfKAyaJQeJQhZv@Base 12
+ _D3std3xml8isLetterFNaNbNiNfwZb@Base 12
+ _D3std3xml9CharTableyAi@Base 12
+ _D3std3xml9checkETagFNaNfKAyaJQeZv@Base 12
+ _D3std3xml9checkMiscFNaNfKAyaZv@Base 12
+ _D3std3xml9checkNameFNaNfKAyaJQeZv@Base 12
+ _D3std3xml__T3optS_DQsQq10checkSpaceFNaNfKAyaZvZQBjQp@Base 12
+ _D3std3xml__T3optS_DQsQq11checkSDDeclFNaNfKAyaZvZQBkQp@Base 12
+ _D3std3xml__T3optS_DQsQq12checkXMLDeclFNaNfKAyaZvZQBlQp@Base 12
+ _D3std3xml__T3optS_DQsQq17checkEncodingDeclFNaNfKAyaZvZQBqQp@Base 12
+ _D3std3xml__T3optS_DQsQq__T3seqS_DQBgQBf16checkDocTypeDeclFNaNfKAyaZvS_DQCsQCr__T4starS_DQDjQDi9checkMiscQBvZQBcQCcZQDlQCjZQEgQCq@Base 12
+ _D3std3xml__T3seqS_DQsQq10checkSpaceFNaNfKAyaZvS_DQBwQBv14checkAttributeQBkZQClQBr@Base 12
+ _D3std3xml__T3seqS_DQsQq16checkDocTypeDeclFNaNfKAyaZvS_DQCcQCb__T4starS_DQCtQCs9checkMiscQBvZQBcQCcZQDjQCj@Base 12
+ _D3std3xml__T4starS_DQtQr9checkMiscFNaNfKAyaZvZQBiQp@Base 12
+ _D3std3xml__T4starS_DQtQr__T3seqS_DQBhQBg10checkSpaceFNaNfKAyaZvS_DQCnQCm14checkAttributeQBkZQCnQBrZQDjQBy@Base 12
+ _D3std3xml__T6encodeTAyaZQmFNaNbNfQnZQq@Base 12
+ _D3std3xml__T6quotedS_DQvQt12checkEncNameFNaNfKAyaZvZQBoQp@Base 12
+ _D3std3xml__T6quotedS_DQvQt15checkVersionNumFNaNfKAyaZvZQBrQp@Base 12
+ _D3std3xml__T6toTypeTxCQvQt3TagZQtFNaNfNkMNgC6ObjectZNgxCQCdQCcQBk@Base 12
+ _D3std3xml__T6toTypeTxCQvQt4ItemZQuFNaNfNkMNgC6ObjectZNgxCQCeQCdQBl@Base 12
+ _D3std3xml__T6toTypeTxCQvQt7ElementZQxFNaNfNkMNgC6ObjectZNgxCQChQCgQBo@Base 12
+ _D3std3xml__T6toTypeTxCQvQt8DocumentZQyFNaNfNkMNgC6ObjectZNgxCQCiQChQBp@Base 12
+ _D3std3zip10ZipArchive12deleteMemberMFNfCQBnQBm13ArchiveMemberZv@Base 12
+ _D3std3zip10ZipArchive12totalEntriesMxFNaNbNdNiNfZk@Base 12
+ _D3std3zip10ZipArchive13removeSegmentMFNaNfkkZv@Base 12
+ _D3std3zip10ZipArchive19zip64ExtractVersionxt@Base 12
+ _D3std3zip10ZipArchive24endOfCentralDirSignatureyAh@Base 12
+ _D3std3zip10ZipArchive24localFileHeaderSignatureyAh@Base 12
+ _D3std3zip10ZipArchive25archiveExtraDataSignatureyAh@Base 12
+ _D3std3zip10ZipArchive25digitalSignatureSignatureyAh@Base 12
+ _D3std3zip10ZipArchive25findEndOfCentralDirRecordMFZk@Base 12
+ _D3std3zip10ZipArchive26centralFileHeaderSignatureyAh@Base 12
+ _D3std3zip10ZipArchive29zip64EndOfCentralDirSignatureyAh@Base 12
+ _D3std3zip10ZipArchive36zip64EndOfCentralDirLocatorSignatureyAh@Base 12
+ _D3std3zip10ZipArchive4dataMFNaNbNdNiNfZAh@Base 12
+ _D3std3zip10ZipArchive5buildMFNaNfZAv@Base 12
+ _D3std3zip10ZipArchive6__ctorMFAvZCQBhQBgQBf@Base 12
+ _D3std3zip10ZipArchive6__ctorMFNaNbNiNfZCQBnQBmQBl@Base 12
+ _D3std3zip10ZipArchive6__initZ@Base 12
+ _D3std3zip10ZipArchive6__vtblZ@Base 12
+ _D3std3zip10ZipArchive6expandMFCQBeQBd13ArchiveMemberZAh@Base 12
+ _D3std3zip10ZipArchive7Segment6__initZ@Base 12
+ _D3std3zip10ZipArchive7__ClassZ@Base 12
+ _D3std3zip10ZipArchive7getUintMFNaNbNiNfkZk@Base 12
+ _D3std3zip10ZipArchive7isZip64MFNaNbNdNiNfbZv@Base 12
+ _D3std3zip10ZipArchive7isZip64MxFNaNbNdNiNfZb@Base 12
+ _D3std3zip10ZipArchive7putUintMFNaNbNiNfkkZv@Base 12
+ _D3std3zip10ZipArchive8getUlongMFNaNbNiNfkZm@Base 12
+ _D3std3zip10ZipArchive8putUlongMFNaNbNiNfkmZv@Base 12
+ _D3std3zip10ZipArchive9addMemberMFNfCQBjQBi13ArchiveMemberZv@Base 12
+ _D3std3zip10ZipArchive9directoryMFNaNbNdNiNfZHAyaCQBwQBv13ArchiveMember@Base 12
+ _D3std3zip10ZipArchive9getUshortMFNaNbNiNfkZt@Base 12
+ _D3std3zip10ZipArchive9putUshortMFNaNbNiNfktZv@Base 12
+ _D3std3zip11__moduleRefZ@Base 12
+ _D3std3zip12ZipException6__initZ@Base 12
+ _D3std3zip12ZipException6__vtblZ@Base 12
+ _D3std3zip12ZipException7__ClassZ@Base 12
+ _D3std3zip12ZipException8__mixin26__ctorMFNaNbNiNfAyaC6object9ThrowableQvmZCQCwQCvQCu@Base 12
+ _D3std3zip12ZipException8__mixin26__ctorMFNaNbNiNfAyaQdmC6object9ThrowableZCQCwQCvQCu@Base 12
+ _D3std3zip12__ModuleInfoZ@Base 12
+ _D3std3zip13ArchiveMember12expandedDataMFNaNbNdNiNfZAh@Base 12
+ _D3std3zip13ArchiveMember12expandedDataMFNdNfAhZv@Base 12
+ _D3std3zip13ArchiveMember12expandedSizeMxFNaNbNdNiNfZk@Base 12
+ _D3std3zip13ArchiveMember14compressedDataMFNaNbNdNiNfZAh@Base 12
+ _D3std3zip13ArchiveMember14compressedSizeMxFNaNbNdNiNfZk@Base 12
+ _D3std3zip13ArchiveMember14extractVersionMxFNaNbNdNiNfZt@Base 12
+ _D3std3zip13ArchiveMember14fileAttributesMFNdNfkZv@Base 12
+ _D3std3zip13ArchiveMember14fileAttributesMxFNbNdNiZk@Base 12
+ _D3std3zip13ArchiveMember17compressionMethodMFNaNdNfEQBzQBy17CompressionMethodZv@Base 12
+ _D3std3zip13ArchiveMember17compressionMethodMxFNaNbNdNiNfZEQCfQCe17CompressionMethod@Base 12
+ _D3std3zip13ArchiveMember4timeMFNaNbNdNiNfkZv@Base 12
+ _D3std3zip13ArchiveMember4timeMFNdSQBh8datetime7systime7SysTimeZv@Base 12
+ _D3std3zip13ArchiveMember4timeMxFNaNbNdNiNfZk@Base 12
+ _D3std3zip13ArchiveMember5crc32MxFNaNbNdNiNfZk@Base 12
+ _D3std3zip13ArchiveMember5indexMFNaNbNdNiNfkZk@Base 12
+ _D3std3zip13ArchiveMember5indexMxFNaNbNdNiNfZk@Base 12
+ _D3std3zip13ArchiveMember6__initZ@Base 12
+ _D3std3zip13ArchiveMember6__vtblZ@Base 12
+ _D3std3zip13ArchiveMember7__ClassZ@Base 12
+ _D3std4conv10parseErrorFNaNfLAyaQdmZCQBjQBi13ConvException@Base 12
+ _D3std4conv11__moduleRefZ@Base 12
+ _D3std4conv11hexToStringFNaNbNfAyaZQe@Base 12
+ _D3std4conv11hexToStringFNaNbNfAyuZQe@Base 12
+ _D3std4conv11hexToStringFNaNbNfAywZQe@Base 12
+ _D3std4conv12__ModuleInfoZ@Base 12
+ _D3std4conv13ConvException6__initZ@Base 12
+ _D3std4conv13ConvException6__vtblZ@Base 12
+ _D3std4conv13ConvException7__ClassZ@Base 12
+ _D3std4conv13ConvException8__mixin26__ctorMFNaNbNiNfAyaC6object9ThrowableQvmZCQCyQCxQCv@Base 12
+ _D3std4conv13ConvException8__mixin26__ctorMFNaNbNiNfAyaQdmC6object9ThrowableZCQCyQCxQCv@Base 12
+ _D3std4conv14isOctalLiteralFNaNbNiNfxAyaZb@Base 12
+ _D3std4conv20strippedOctalLiteralFAyaZQe@Base 12
+ _D3std4conv21ConvOverflowException6__ctorMFNaNbNfAyaQdmZCQCdQCcQCa@Base 12
+ _D3std4conv21ConvOverflowException6__initZ@Base 12
+ _D3std4conv21ConvOverflowException6__vtblZ@Base 12
+ _D3std4conv21ConvOverflowException7__ClassZ@Base 12
+ _D3std4conv__T13hexStrLiteralTAyaZQuFNaNbNeMQoZAa@Base 12
+ _D3std4conv__T13hexStrLiteralTAyuZQuFNaNbNeMQoZAu@Base 12
+ _D3std4conv__T13hexStrLiteralTAywZQuFNaNbNeMQoZAw@Base 12
+ _D3std4conv__T2toTAyaZ__TQlTAaZQrFNaNbNfQmZQz@Base 12
+ _D3std4conv__T2toTAyaZ__TQlTAxaZQsFNaNbNfQnZQBa@Base 12
+ _D3std4conv__T2toTAyaZ__TQlTEQBb12experimental6logger4core8LogLevelZQCcFNaNfQBwZQCk@Base 12
+ _D3std4conv__T2toTAyaZ__TQlTEQBb5regex8internal2ir2IRZQBoFNaNfQBiZQBw@Base 12
+ _D3std4conv__T2toTAyaZ__TQlTEQBb6socket12SocketOptionZQBoFNaNfQBiZQBw@Base 12
+ _D3std4conv__T2toTAyaZ__TQlTPSQBc11parallelism12AbstractTaskZQBvFNaNfQBpZQCd@Base 12
+ _D3std4conv__T2toTAyaZ__TQlTPaZQrFNaNbQkZQx@Base 12
+ _D3std4conv__T2toTAyaZ__TQlTPxaZQsFNaNbQlZQy@Base 12
+ _D3std4conv__T2toTAyaZ__TQlTPxhZQsFNaNfQlZQy@Base 12
+ _D3std4conv__T2toTAyaZ__TQlTQkZQrFNaNbNiNfQyZQBb@Base 12
+ _D3std4conv__T2toTAyaZ__TQlTSQBb11concurrency3TidZQBkFNaNfQBeZQBs@Base 12
+ _D3std4conv__T2toTAyaZ__TQlTSQBb4path__T16asNormalizedPathTSQCg5range__T5chainTSQDa3utf__T10byCodeUnitTQDhZQrFQDoZ14ByCodeUnitImplTSQFaQCu__T10OnlyResultTaZQpTQDcZQDnFQDkQBnQDqZ6ResultZQFpFNkMQFdZQtZQHdFNaNfQGxZQHl@Base 12
+ _D3std4conv__T2toTAyaZ__TQlTSQBb5range__T5chainTSQBv3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQDuQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFjFNaNfQFdZQFr@Base 12
+ _D3std4conv__T2toTAyaZ__TQlThZQqFNaNbNfhZQx@Base 12
+ _D3std4conv__T2toTAyaZ__TQlTiZQqFNaNbNfiZQx@Base 12
+ _D3std4conv__T2toTAyaZ__TQlTkZQqFNaNbNfkZQx@Base 12
+ _D3std4conv__T2toTAyaZ__TQlTmZQqFNaNbNfmZQx@Base 12
+ _D3std4conv__T2toTAyaZ__TQlTtZQqFNaNbNftZQx@Base 12
+ _D3std4conv__T2toTAyaZ__TQlTxkZQrFNaNbNfxkZQz@Base 12
+ _D3std4conv__T2toTAyaZ__TQlTxlZQrFNaNbNfxlZQz@Base 12
+ _D3std4conv__T2toTAyaZ__TQlTxmZQrFNaNbNfxmZQz@Base 12
+ _D3std4conv__T2toTAyaZ__TQlTykZQrFNaNbNfykZQz@Base 12
+ _D3std4conv__T2toThZ__TQjTxkZQpFNaNfxkZh@Base 12
+ _D3std4conv__T2toTiZ__TQjTEQz3net7isemail15EmailStatusCodeZQBtFNaNbNiNfQBtZi@Base 12
+ _D3std4conv__T2toTiZ__TQjTEQz8datetime4date5MonthZQBkFNaNbNiNfQBkZi@Base 12
+ _D3std4conv__T2toTiZ__TQjThZQoFNaNbNiNfhZi@Base 12
+ _D3std4conv__T2toTiZ__TQjTiZQoFNaNbNiNfiZi@Base 12
+ _D3std4conv__T2toTiZ__TQjTkZQoFNaNfkZi@Base 12
+ _D3std4conv__T2toTiZ__TQjTmZQoFNaNfmZi@Base 12
+ _D3std4conv__T2toTiZ__TQjTsZQoFNaNbNiNfsZi@Base 12
+ _D3std4conv__T2toTiZ__TQjTxEQBa8datetime4date5MonthZQBmFNaNbNiNfxQBmZi@Base 12
+ _D3std4conv__T2toTiZ__TQjTxhZQpFNaNbNiNfxhZi@Base 12
+ _D3std4conv__T2toTiZ__TQjTxkZQpFNaNfxkZi@Base 12
+ _D3std4conv__T2toTiZ__TQjTxlZQpFNaNfxlZi@Base 12
+ _D3std4conv__T2toTiZ__TQjTxmZQpFNaNfxmZi@Base 12
+ _D3std4conv__T2toTiZ__TQjTxsZQpFNaNbNiNfxsZi@Base 12
+ _D3std4conv__T2toTiZ__TQjTxtZQpFNaNbNiNfxtZi@Base 12
+ _D3std4conv__T2toTiZ__TQjTykZQpFNaNfykZi@Base 12
+ _D3std4conv__T2toTiZ__TQjTymZQpFNaNfymZi@Base 12
+ _D3std4conv__T2toTkZ__TQjTkZQoFNaNbNiNfkZk@Base 12
+ _D3std4conv__T2toTkZ__TQjTmZQoFNaNfmZk@Base 12
+ _D3std4conv__T2toTlZ__TQjTlZQoFNaNbNiNflZl@Base 12
+ _D3std4conv__T2toTlZ__TQjTmZQoFNaNfmZl@Base 12
+ _D3std4conv__T2toTmZ__TQjTkZQoFNaNbNiNfkZm@Base 12
+ _D3std4conv__T2toTmZ__TQjTmZQoFNaNbNiNfmZm@Base 12
+ _D3std4conv__T2toTtZ__TQjTAxaZQqFNaNfQlZt@Base 12
+ _D3std4conv__T2toTwZ__TQjTwZQoFNaNbNiNfwZw@Base 12
+ _D3std4conv__T4textTAxaTAyaTQiZQrFNaNbNfQuQsQyZQx@Base 12
+ _D3std4conv__T4textTAyaTAxaTQiZQrFNaNbNfQuQsQyZQBb@Base 12
+ _D3std4conv__T4textTAyaTAxaZQoFNaNbNfQrQpZQw@Base 12
+ _D3std4conv__T4textTAyaTQeTQhTAxaTQoZQxFNaNbNfQBaQBdQBgQzQBlZQBp@Base 12
+ _D3std4conv__T4textTAyaTQeTQhTQkTkZQvFNaNbNfQyQBaQBdQBgkZQBl@Base 12
+ _D3std4conv__T4textTAyaTQeTQhTQkZQtFNaNbNfQwQyQBaQBdZQBh@Base 12
+ _D3std4conv__T4textTAyaTQeTQhZQqFNaNbNfQtQvQxZQBa@Base 12
+ _D3std4conv__T4textTAyaTQeTiTQjTiTQoZQxFNaNbNfQBaQBdiQBhiQBlZQBp@Base 12
+ _D3std4conv__T4textTAyaTQeZQnFNaNbNfQqQsZQv@Base 12
+ _D3std4conv__T4textTAyaTaZQmFNaNbNfQpaZQt@Base 12
+ _D3std4conv__T4textTAyaThTaTaTQkTmZQvFNaNbNfQyhaaQBdmZQBi@Base 12
+ _D3std4conv__T4textTAyaTiTQgZQpFNaNbNfQsiQvZQy@Base 12
+ _D3std4conv__T4textTAyaTiZQmFNaNbNfQpiZQt@Base 12
+ _D3std4conv__T4textTAyaTkTQgTkZQrFNaNbNfQukQxkZQBb@Base 12
+ _D3std4conv__T4textTAyaTkTQgTmTQlZQuFNaNbNfQxkQBamQBeZQBi@Base 12
+ _D3std4conv__T4textTAyaTmTQgZQpFNaNbNfQsmQvZQy@Base 12
+ _D3std4conv__T4textTAyaTwTQgZQpFNaNfQqwQtZQw@Base 12
+ _D3std4conv__T4textTAyaTxaTQhZQqFNaNbNfQtxaQxZQBa@Base 12
+ _D3std4conv__T4textTAyaTxaZQnFNaNbNfQqxaZQv@Base 12
+ _D3std4conv__T4textTAyaZQkFNaNbNiNfQpZQs@Base 12
+ _D3std4conv__T4textTEQt5regex8internal2ir2IRZQBfFNaNfQBhZAya@Base 12
+ _D3std4conv__T4textTPSQu11parallelism12AbstractTaskTaTQBiZQBsFNaNfQBuaQByZAya@Base 12
+ _D3std4conv__T4textTPxhTAyaTQiZQrFNaNfQsQqQwZQv@Base 12
+ _D3std4conv__T5octalTiZQjFNaNbNfxAyaZi@Base 12
+ _D3std4conv__T5parseThTAxaVEQBa8typecons__T4FlagVAyaa7_646f436f756e74ZQBbi0ZQCkFNaNfMKQClZh@Base 12
+ _D3std4conv__T5parseTiTAxaVEQBa8typecons__T4FlagVAyaa7_646f436f756e74ZQBbi0ZQCkFNaNfMKQClZi@Base 12
+ _D3std4conv__T5parseTkTAxaVEQBa8typecons__T4FlagVAyaa7_646f436f756e74ZQBbi0ZQCkFNaNfMKQClZk@Base 12
+ _D3std4conv__T5parseTkTAxaVEQBa8typecons__T4FlagVAyaa7_646f436f756e74ZQBbi1ZQCkFNaNfMKQClZSQDlQCl__T5TupleTkVQCia4_64617461TmVQCza5_636f756e74ZQBr@Base 12
+ _D3std4conv__T5parseTtTAxaVEQBa8typecons__T4FlagVAyaa7_646f436f756e74ZQBbi0ZQCkFNaNfMKQClZt@Base 12
+ _D3std4conv__T5toStrTAyaTPSQz11parallelism12AbstractTaskZQBrFNaNfQBoZQBw@Base 12
+ _D3std4conv__T5toStrTAyaTPxhZQpFNaNfQlZQs@Base 12
+ _D3std4conv__T5toStrTAyaTSQy11concurrency3TidZQBgFNaNfQBdZQBl@Base 12
+ _D3std4conv__T5toStrTAyaTSQy4path__T16asNormalizedPathTSQCc5range__T5chainTSQCw3utf__T10byCodeUnitTQDaZQrFQDhZ14ByCodeUnitImplTSQEwQCu__T10OnlyResultTaZQpTQDcZQDnFQDkQBnQDqZ6ResultZQFpFNkMQFdZQtZQGzFNaNfQGwZQHe@Base 12
+ _D3std4conv__T5toStrTAyaTSQy5range__T5chainTSQBr3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQDqQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFfFNaNfQFcZQFk@Base 12
+ _D3std4conv__T5toStrTyAaTEQy12experimental6logger4core8LogLevelZQByFNaNfQBvZyQCd@Base 12
+ _D3std4conv__T5toStrTyAaTEQy5regex8internal2ir2IRZQBkFNaNfQBhZyQBp@Base 12
+ _D3std4conv__T5toStrTyAaTEQy6socket12SocketOptionZQBkFNaNfQBhZyQBp@Base 12
+ _D3std4conv__T6toImplTAyaTAaZQpFNaNbNfQmZQt@Base 12
+ _D3std4conv__T6toImplTAyaTAxaZQqFNaNbNfQnZQu@Base 12
+ _D3std4conv__T6toImplTAyaTEQz12experimental6logger4core8LogLevelZQBzFNaNfQBvZQCd@Base 12
+ _D3std4conv__T6toImplTAyaTEQz5regex8internal2ir2IRZQBlFNaNfQBhZQBp@Base 12
+ _D3std4conv__T6toImplTAyaTEQz6socket12SocketOptionZQBlFNaNfQBhZQBp@Base 12
+ _D3std4conv__T6toImplTAyaTPSQBa11parallelism12AbstractTaskZQBtFNaNfQBpZQBx@Base 12
+ _D3std4conv__T6toImplTAyaTPaZQpFNaNbQkZQr@Base 12
+ _D3std4conv__T6toImplTAyaTPxaZQqFNaNbQlZQs@Base 12
+ _D3std4conv__T6toImplTAyaTPxhZQqFNaNfQlZQs@Base 12
+ _D3std4conv__T6toImplTAyaTQeZQpFNaNbNiNfQsZQv@Base 12
+ _D3std4conv__T6toImplTAyaTSQz11concurrency3TidZQBhFNaNfQBdZQBl@Base 12
+ _D3std4conv__T6toImplTAyaTSQz4path__T16asNormalizedPathTSQCd5range__T5chainTSQCx3utf__T10byCodeUnitTQDaZQrFQDhZ14ByCodeUnitImplTSQExQCu__T10OnlyResultTaZQpTQDcZQDnFQDkQBnQDqZ6ResultZQFpFNkMQFdZQtZQHaFNaNfQGwZQHe@Base 12
+ _D3std4conv__T6toImplTAyaTSQz5range__T5chainTSQBs3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQDrQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFgFNaNfQFcZQFk@Base 12
+ _D3std4conv__T6toImplTAyaThZQoFNaNbNehkEQBm5ascii10LetterCaseZQBo@Base 12
+ _D3std4conv__T6toImplTAyaThZQoFNaNbNfhZQr@Base 12
+ _D3std4conv__T6toImplTAyaTiZQoFNaNbNeikEQBm5ascii10LetterCaseZQBo@Base 12
+ _D3std4conv__T6toImplTAyaTiZQoFNaNbNfiZQr@Base 12
+ _D3std4conv__T6toImplTAyaTkZQoFNaNbNekkEQBm5ascii10LetterCaseZQBo@Base 12
+ _D3std4conv__T6toImplTAyaTkZQoFNaNbNfkZQr@Base 12
+ _D3std4conv__T6toImplTAyaTmZQoFNaNbNemkEQBm5ascii10LetterCaseZQBo@Base 12
+ _D3std4conv__T6toImplTAyaTmZQoFNaNbNfmZQr@Base 12
+ _D3std4conv__T6toImplTAyaTtZQoFNaNbNetkEQBm5ascii10LetterCaseZQBo@Base 12
+ _D3std4conv__T6toImplTAyaTtZQoFNaNbNftZQr@Base 12
+ _D3std4conv__T6toImplTAyaTxkZQpFNaNbNexkkEQBo5ascii10LetterCaseZQBq@Base 12
+ _D3std4conv__T6toImplTAyaTxkZQpFNaNbNfxkZQt@Base 12
+ _D3std4conv__T6toImplTAyaTxlZQpFNaNbNexlkEQBo5ascii10LetterCaseZQBq@Base 12
+ _D3std4conv__T6toImplTAyaTxlZQpFNaNbNfxlZQt@Base 12
+ _D3std4conv__T6toImplTAyaTxmZQpFNaNbNexmkEQBo5ascii10LetterCaseZQBq@Base 12
+ _D3std4conv__T6toImplTAyaTxmZQpFNaNbNfxmZQt@Base 12
+ _D3std4conv__T6toImplTAyaTykZQpFNaNbNeykkEQBo5ascii10LetterCaseZQBq@Base 12
+ _D3std4conv__T6toImplTAyaTykZQpFNaNbNfykZQt@Base 12
+ _D3std4conv__T6toImplThTxkZQnFNaNfxkZh@Base 12
+ _D3std4conv__T6toImplTiTEQx3net7isemail15EmailStatusCodeZQBrFNaNbNiNfQBtZi@Base 12
+ _D3std4conv__T6toImplTiTEQx8datetime4date5MonthZQBiFNaNbNiNfQBkZi@Base 12
+ _D3std4conv__T6toImplTiThZQmFNaNbNiNfhZi@Base 12
+ _D3std4conv__T6toImplTiTiZQmFNaNbNiNfiZi@Base 12
+ _D3std4conv__T6toImplTiTkZQmFNaNfkZi@Base 12
+ _D3std4conv__T6toImplTiTmZQmFNaNfmZi@Base 12
+ _D3std4conv__T6toImplTiTsZQmFNaNbNiNfsZi@Base 12
+ _D3std4conv__T6toImplTiTxEQy8datetime4date5MonthZQBjFNaNbNiNfxQBlZi@Base 12
+ _D3std4conv__T6toImplTiTxhZQnFNaNbNiNfxhZi@Base 12
+ _D3std4conv__T6toImplTiTxkZQnFNaNfxkZi@Base 12
+ _D3std4conv__T6toImplTiTxlZQnFNaNfxlZi@Base 12
+ _D3std4conv__T6toImplTiTxmZQnFNaNfxmZi@Base 12
+ _D3std4conv__T6toImplTiTxsZQnFNaNbNiNfxsZi@Base 12
+ _D3std4conv__T6toImplTiTxtZQnFNaNbNiNfxtZi@Base 12
+ _D3std4conv__T6toImplTiTykZQnFNaNfykZi@Base 12
+ _D3std4conv__T6toImplTiTymZQnFNaNfymZi@Base 12
+ _D3std4conv__T6toImplTkTkZQmFNaNbNiNfkZk@Base 12
+ _D3std4conv__T6toImplTkTmZQmFNaNfmZk@Base 12
+ _D3std4conv__T6toImplTlTlZQmFNaNbNiNflZl@Base 12
+ _D3std4conv__T6toImplTlTmZQmFNaNfmZl@Base 12
+ _D3std4conv__T6toImplTmTkZQmFNaNbNiNfkZm@Base 12
+ _D3std4conv__T6toImplTmTmZQmFNaNbNiNfmZm@Base 12
+ _D3std4conv__T6toImplTtTAxaZQoFNaNfQlZt@Base 12
+ _D3std4conv__T6toImplTwTwZQmFNaNbNiNfwZw@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa12experimental6logger4core8LogLevelVQBoi128ZQCjyQCd@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa12experimental6logger4core8LogLevelVQBoi160ZQCjyQCd@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa12experimental6logger4core8LogLevelVQBoi192ZQCjyQCd@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa12experimental6logger4core8LogLevelVQBoi1ZQChyQCb@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa12experimental6logger4core8LogLevelVQBoi255ZQCjyQCd@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa12experimental6logger4core8LogLevelVQBoi32ZQCiyQCc@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa12experimental6logger4core8LogLevelVQBoi64ZQCiyQCc@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa12experimental6logger4core8LogLevelVQBoi96ZQCiyQCc@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai128ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai129ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai130ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai132ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai133ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai134ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai136ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai137ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai138ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai140ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai141ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai142ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai144ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai145ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai146ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai148ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai149ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai150ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai152ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai153ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai154ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai156ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai157ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai158ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai160ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai161ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai162ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai164ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai165ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai166ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai168ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai172ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai176ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai180ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai184ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai188ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai192ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa5regex8internal2ir2IRVQBai196ZQBvyQBp@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa6socket12SocketOptionVQBai10ZQBuyQBo@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa6socket12SocketOptionVQBai13ZQBuyQBo@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa6socket12SocketOptionVQBai16ZQBuyQBo@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa6socket12SocketOptionVQBai17ZQBuyQBo@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa6socket12SocketOptionVQBai18ZQBuyQBo@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa6socket12SocketOptionVQBai19ZQBuyQBo@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa6socket12SocketOptionVQBai1ZQBtyQBn@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa6socket12SocketOptionVQBai20ZQBuyQBo@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa6socket12SocketOptionVQBai21ZQBuyQBo@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa6socket12SocketOptionVQBai26ZQBuyQBo@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa6socket12SocketOptionVQBai2ZQBtyQBn@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa6socket12SocketOptionVQBai30ZQBuyQBo@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa6socket12SocketOptionVQBai3ZQBtyQBn@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa6socket12SocketOptionVQBai4ZQBtyQBn@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa6socket12SocketOptionVQBai5ZQBtyQBn@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa6socket12SocketOptionVQBai6ZQBtyQBn@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa6socket12SocketOptionVQBai7ZQBtyQBn@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa6socket12SocketOptionVQBai8ZQBtyQBn@Base 12
+ _D3std4conv__T7enumRepTyAaTEQBa6socket12SocketOptionVQBai9ZQBtyQBn@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6Result10initializeMFNaNbNiNfiZv@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6Result4backMFNaNbNdNiNfZa@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6Result4saveMFNaNbNdNiNfZSQDrQDq__TQDoVii10TaVQDji1TiZQEhFNaNbNiNfiZQCq@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6Result5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6Result5frontMFNaNbNdNiNfZa@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6Result6__initZ@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6Result6lengthMFNaNbNdNiNfZm@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6Result7opIndexMFNaNbNiNfmZa@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6Result7opSliceMFNaNbNiNfmmZSQDuQDt__TQDrVii10TaVQDmi1TiZQEkFNaNbNiNfiZQCt@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6Result7popBackMFNaNbNiNfZv@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6Result8popFrontMFNaNbNiNfZv@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TiZQBrFNaNbNiNfiZSQCsQCr__TQCpVii10TaVQCki1TiZQDiFNaNbNiNfiZ6Result@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TkZQBrFNaNbNiNfkZ6Result10initializeMFNaNbNiNfkZv@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TkZQBrFNaNbNiNfkZ6Result4backMFNaNbNdNiNfZa@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TkZQBrFNaNbNiNfkZ6Result4saveMFNaNbNdNiNfZSQDrQDq__TQDoVii10TaVQDji1TkZQEhFNaNbNiNfkZQCq@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TkZQBrFNaNbNiNfkZ6Result5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TkZQBrFNaNbNiNfkZ6Result5frontMFNaNbNdNiNfZa@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TkZQBrFNaNbNiNfkZ6Result6__initZ@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TkZQBrFNaNbNiNfkZ6Result6lengthMFNaNbNdNiNfZm@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TkZQBrFNaNbNiNfkZ6Result7opIndexMFNaNbNiNfmZa@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TkZQBrFNaNbNiNfkZ6Result7opSliceMFNaNbNiNfmmZSQDuQDt__TQDrVii10TaVQDmi1TkZQEkFNaNbNiNfkZQCt@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TkZQBrFNaNbNiNfkZ6Result7popBackMFNaNbNiNfZv@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TkZQBrFNaNbNiNfkZ6Result8popFrontMFNaNbNiNfZv@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TkZQBrFNaNbNiNfkZSQCsQCr__TQCpVii10TaVQCki1TkZQDiFNaNbNiNfkZ6Result@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TlZQBrFNaNbNiNflZ6Result10initializeMFNaNbNiNflZv@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TlZQBrFNaNbNiNflZ6Result4backMFNaNbNdNiNfZa@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TlZQBrFNaNbNiNflZ6Result4saveMFNaNbNdNiNfZSQDrQDq__TQDoVii10TaVQDji1TlZQEhFNaNbNiNflZQCq@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TlZQBrFNaNbNiNflZ6Result5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TlZQBrFNaNbNiNflZ6Result5frontMFNaNbNdNiNfZa@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TlZQBrFNaNbNiNflZ6Result6__initZ@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TlZQBrFNaNbNiNflZ6Result6lengthMFNaNbNdNiNfZm@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TlZQBrFNaNbNiNflZ6Result7opIndexMFNaNbNiNfmZa@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TlZQBrFNaNbNiNflZ6Result7opSliceMFNaNbNiNfmmZSQDuQDt__TQDrVii10TaVQDmi1TlZQEkFNaNbNiNflZQCt@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TlZQBrFNaNbNiNflZ6Result7popBackMFNaNbNiNfZv@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TlZQBrFNaNbNiNflZ6Result8popFrontMFNaNbNiNfZv@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TlZQBrFNaNbNiNflZSQCsQCr__TQCpVii10TaVQCki1TlZQDiFNaNbNiNflZ6Result@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TmZQBrFNaNbNiNfmZ6Result10initializeMFNaNbNiNfmZv@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TmZQBrFNaNbNiNfmZ6Result4backMFNaNbNdNiNfZa@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TmZQBrFNaNbNiNfmZ6Result4saveMFNaNbNdNiNfZSQDrQDq__TQDoVii10TaVQDji1TmZQEhFNaNbNiNfmZQCq@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TmZQBrFNaNbNiNfmZ6Result5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TmZQBrFNaNbNiNfmZ6Result5frontMFNaNbNdNiNfZa@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TmZQBrFNaNbNiNfmZ6Result6__initZ@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TmZQBrFNaNbNiNfmZ6Result6lengthMFNaNbNdNiNfZm@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TmZQBrFNaNbNiNfmZ6Result7opIndexMFNaNbNiNfmZa@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TmZQBrFNaNbNiNfmZ6Result7opSliceMFNaNbNiNfmmZSQDuQDt__TQDrVii10TaVQDmi1TmZQEkFNaNbNiNfmZQCt@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TmZQBrFNaNbNiNfmZ6Result7popBackMFNaNbNiNfZv@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TmZQBrFNaNbNiNfmZ6Result8popFrontMFNaNbNiNfZv@Base 12
+ _D3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TmZQBrFNaNbNiNfmZSQCsQCr__TQCpVii10TaVQCki1TmZQDiFNaNbNiNfmZ6Result@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei0TkZQBrFNaNbNiNfkZ6Result4backMFNaNbNdNiNfZa@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei0TkZQBrFNaNbNiNfkZ6Result4saveMFNaNbNdNiNfZSQDrQDq__TQDoVii16TaVQDji0TkZQEhFNaNbNiNfkZQCq@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei0TkZQBrFNaNbNiNfkZ6Result5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei0TkZQBrFNaNbNiNfkZ6Result5frontMFNaNbNdNiNfZa@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei0TkZQBrFNaNbNiNfkZ6Result6__ctorMFNaNbNcNiNfkZSQDuQDt__TQDrVii16TaVQDmi0TkZQEkFNaNbNiNfkZQCt@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei0TkZQBrFNaNbNiNfkZ6Result6__initZ@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei0TkZQBrFNaNbNiNfkZ6Result6lengthMFNaNbNdNiNfZm@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei0TkZQBrFNaNbNiNfkZ6Result7opIndexMFNaNbNiNfmZa@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei0TkZQBrFNaNbNiNfkZ6Result7opSliceMFNaNbNiNfmmZSQDuQDt__TQDrVii16TaVQDmi0TkZQEkFNaNbNiNfkZQCt@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei0TkZQBrFNaNbNiNfkZ6Result7popBackMFNaNbNiNfZv@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei0TkZQBrFNaNbNiNfkZ6Result8popFrontMFNaNbNiNfZv@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei0TkZQBrFNaNbNiNfkZSQCsQCr__TQCpVii16TaVQCki0TkZQDiFNaNbNiNfkZ6Result@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei0TmZQBrFNaNbNiNfmZ6Result4backMFNaNbNdNiNfZa@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei0TmZQBrFNaNbNiNfmZ6Result4saveMFNaNbNdNiNfZSQDrQDq__TQDoVii16TaVQDji0TmZQEhFNaNbNiNfmZQCq@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei0TmZQBrFNaNbNiNfmZ6Result5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei0TmZQBrFNaNbNiNfmZ6Result5frontMFNaNbNdNiNfZa@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei0TmZQBrFNaNbNiNfmZ6Result6__ctorMFNaNbNcNiNfmZSQDuQDt__TQDrVii16TaVQDmi0TmZQEkFNaNbNiNfmZQCt@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei0TmZQBrFNaNbNiNfmZ6Result6__initZ@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei0TmZQBrFNaNbNiNfmZ6Result6lengthMFNaNbNdNiNfZm@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei0TmZQBrFNaNbNiNfmZ6Result7opIndexMFNaNbNiNfmZa@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei0TmZQBrFNaNbNiNfmZ6Result7opSliceMFNaNbNiNfmmZSQDuQDt__TQDrVii16TaVQDmi0TmZQEkFNaNbNiNfmZQCt@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei0TmZQBrFNaNbNiNfmZ6Result7popBackMFNaNbNiNfZv@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei0TmZQBrFNaNbNiNfmZ6Result8popFrontMFNaNbNiNfZv@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei0TmZQBrFNaNbNiNfmZSQCsQCr__TQCpVii16TaVQCki0TmZQDiFNaNbNiNfmZ6Result@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei1TkZQBrFNaNbNiNfkZ6Result4backMFNaNbNdNiNfZa@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei1TkZQBrFNaNbNiNfkZ6Result4saveMFNaNbNdNiNfZSQDrQDq__TQDoVii16TaVQDji1TkZQEhFNaNbNiNfkZQCq@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei1TkZQBrFNaNbNiNfkZ6Result5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei1TkZQBrFNaNbNiNfkZ6Result5frontMFNaNbNdNiNfZa@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei1TkZQBrFNaNbNiNfkZ6Result6__ctorMFNaNbNcNiNfkZSQDuQDt__TQDrVii16TaVQDmi1TkZQEkFNaNbNiNfkZQCt@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei1TkZQBrFNaNbNiNfkZ6Result6__initZ@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei1TkZQBrFNaNbNiNfkZ6Result6lengthMFNaNbNdNiNfZm@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei1TkZQBrFNaNbNiNfkZ6Result7opIndexMFNaNbNiNfmZa@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei1TkZQBrFNaNbNiNfkZ6Result7opSliceMFNaNbNiNfmmZSQDuQDt__TQDrVii16TaVQDmi1TkZQEkFNaNbNiNfkZQCt@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei1TkZQBrFNaNbNiNfkZ6Result7popBackMFNaNbNiNfZv@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei1TkZQBrFNaNbNiNfkZ6Result8popFrontMFNaNbNiNfZv@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei1TkZQBrFNaNbNiNfkZSQCsQCr__TQCpVii16TaVQCki1TkZQDiFNaNbNiNfkZ6Result@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei1TmZQBrFNaNbNiNfmZ6Result4backMFNaNbNdNiNfZa@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei1TmZQBrFNaNbNiNfmZ6Result4saveMFNaNbNdNiNfZSQDrQDq__TQDoVii16TaVQDji1TmZQEhFNaNbNiNfmZQCq@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei1TmZQBrFNaNbNiNfmZ6Result5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei1TmZQBrFNaNbNiNfmZ6Result5frontMFNaNbNdNiNfZa@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei1TmZQBrFNaNbNiNfmZ6Result6__ctorMFNaNbNcNiNfmZSQDuQDt__TQDrVii16TaVQDmi1TmZQEkFNaNbNiNfmZQCt@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei1TmZQBrFNaNbNiNfmZ6Result6__initZ@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei1TmZQBrFNaNbNiNfmZ6Result6lengthMFNaNbNdNiNfZm@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei1TmZQBrFNaNbNiNfmZ6Result7opIndexMFNaNbNiNfmZa@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei1TmZQBrFNaNbNiNfmZ6Result7opSliceMFNaNbNiNfmmZSQDuQDt__TQDrVii16TaVQDmi1TmZQEkFNaNbNiNfmZQCt@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei1TmZQBrFNaNbNiNfmZ6Result7popBackMFNaNbNiNfZv@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei1TmZQBrFNaNbNiNfmZ6Result8popFrontMFNaNbNiNfZv@Base 12
+ _D3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei1TmZQBrFNaNbNiNfmZSQCsQCr__TQCpVii16TaVQCki1TmZQDiFNaNbNiNfmZ6Result@Base 12
+ _D3std4conv__T7toCharsVii2TaVEQBc5ascii10LetterCasei1TkZQBqFNaNbNiNfkZ6Result4backMFNaNbNdNiNfZa@Base 12
+ _D3std4conv__T7toCharsVii2TaVEQBc5ascii10LetterCasei1TkZQBqFNaNbNiNfkZ6Result4saveMFNaNbNdNiNfZSQDqQDp__TQDnVii2TaVQDii1TkZQEfFNaNbNiNfkZQCp@Base 12
+ _D3std4conv__T7toCharsVii2TaVEQBc5ascii10LetterCasei1TkZQBqFNaNbNiNfkZ6Result5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std4conv__T7toCharsVii2TaVEQBc5ascii10LetterCasei1TkZQBqFNaNbNiNfkZ6Result5frontMFNaNbNdNiNfZa@Base 12
+ _D3std4conv__T7toCharsVii2TaVEQBc5ascii10LetterCasei1TkZQBqFNaNbNiNfkZ6Result6__ctorMFNaNbNcNiNfkZSQDtQDs__TQDqVii2TaVQDli1TkZQEiFNaNbNiNfkZQCs@Base 12
+ _D3std4conv__T7toCharsVii2TaVEQBc5ascii10LetterCasei1TkZQBqFNaNbNiNfkZ6Result6__initZ@Base 12
+ _D3std4conv__T7toCharsVii2TaVEQBc5ascii10LetterCasei1TkZQBqFNaNbNiNfkZ6Result6lengthMFNaNbNdNiNfZm@Base 12
+ _D3std4conv__T7toCharsVii2TaVEQBc5ascii10LetterCasei1TkZQBqFNaNbNiNfkZ6Result7opIndexMFNaNbNiNfmZa@Base 12
+ _D3std4conv__T7toCharsVii2TaVEQBc5ascii10LetterCasei1TkZQBqFNaNbNiNfkZ6Result7opSliceMFNaNbNiNfmmZSQDtQDs__TQDqVii2TaVQDli1TkZQEiFNaNbNiNfkZQCs@Base 12
+ _D3std4conv__T7toCharsVii2TaVEQBc5ascii10LetterCasei1TkZQBqFNaNbNiNfkZ6Result7popBackMFNaNbNiNfZv@Base 12
+ _D3std4conv__T7toCharsVii2TaVEQBc5ascii10LetterCasei1TkZQBqFNaNbNiNfkZ6Result8popFrontMFNaNbNiNfZv@Base 12
+ _D3std4conv__T7toCharsVii2TaVEQBc5ascii10LetterCasei1TkZQBqFNaNbNiNfkZSQCrQCq__TQCoVii2TaVQCji1TkZQDgFNaNbNiNfkZ6Result@Base 12
+ _D3std4conv__T7toCharsVii2TaVEQBc5ascii10LetterCasei1TmZQBqFNaNbNiNfmZ6Result4backMFNaNbNdNiNfZa@Base 12
+ _D3std4conv__T7toCharsVii2TaVEQBc5ascii10LetterCasei1TmZQBqFNaNbNiNfmZ6Result4saveMFNaNbNdNiNfZSQDqQDp__TQDnVii2TaVQDii1TmZQEfFNaNbNiNfmZQCp@Base 12
+ _D3std4conv__T7toCharsVii2TaVEQBc5ascii10LetterCasei1TmZQBqFNaNbNiNfmZ6Result5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std4conv__T7toCharsVii2TaVEQBc5ascii10LetterCasei1TmZQBqFNaNbNiNfmZ6Result5frontMFNaNbNdNiNfZa@Base 12
+ _D3std4conv__T7toCharsVii2TaVEQBc5ascii10LetterCasei1TmZQBqFNaNbNiNfmZ6Result6__ctorMFNaNbNcNiNfmZSQDtQDs__TQDqVii2TaVQDli1TmZQEiFNaNbNiNfmZQCs@Base 12
+ _D3std4conv__T7toCharsVii2TaVEQBc5ascii10LetterCasei1TmZQBqFNaNbNiNfmZ6Result6__initZ@Base 12
+ _D3std4conv__T7toCharsVii2TaVEQBc5ascii10LetterCasei1TmZQBqFNaNbNiNfmZ6Result6lengthMFNaNbNdNiNfZm@Base 12
+ _D3std4conv__T7toCharsVii2TaVEQBc5ascii10LetterCasei1TmZQBqFNaNbNiNfmZ6Result7opIndexMFNaNbNiNfmZa@Base 12
+ _D3std4conv__T7toCharsVii2TaVEQBc5ascii10LetterCasei1TmZQBqFNaNbNiNfmZ6Result7opSliceMFNaNbNiNfmmZSQDtQDs__TQDqVii2TaVQDli1TmZQEiFNaNbNiNfmZQCs@Base 12
+ _D3std4conv__T7toCharsVii2TaVEQBc5ascii10LetterCasei1TmZQBqFNaNbNiNfmZ6Result7popBackMFNaNbNiNfZv@Base 12
+ _D3std4conv__T7toCharsVii2TaVEQBc5ascii10LetterCasei1TmZQBqFNaNbNiNfmZ6Result8popFrontMFNaNbNiNfZv@Base 12
+ _D3std4conv__T7toCharsVii2TaVEQBc5ascii10LetterCasei1TmZQBqFNaNbNiNfmZSQCrQCq__TQCoVii2TaVQCji1TmZQDgFNaNbNiNfmZ6Result@Base 12
+ _D3std4conv__T7toCharsVii8TaVEQBc5ascii10LetterCasei1TkZQBqFNaNbNiNfkZ6Result4backMFNaNbNdNiNfZa@Base 12
+ _D3std4conv__T7toCharsVii8TaVEQBc5ascii10LetterCasei1TkZQBqFNaNbNiNfkZ6Result4saveMFNaNbNdNiNfZSQDqQDp__TQDnVii8TaVQDii1TkZQEfFNaNbNiNfkZQCp@Base 12
+ _D3std4conv__T7toCharsVii8TaVEQBc5ascii10LetterCasei1TkZQBqFNaNbNiNfkZ6Result5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std4conv__T7toCharsVii8TaVEQBc5ascii10LetterCasei1TkZQBqFNaNbNiNfkZ6Result5frontMFNaNbNdNiNfZa@Base 12
+ _D3std4conv__T7toCharsVii8TaVEQBc5ascii10LetterCasei1TkZQBqFNaNbNiNfkZ6Result6__ctorMFNaNbNcNiNfkZSQDtQDs__TQDqVii8TaVQDli1TkZQEiFNaNbNiNfkZQCs@Base 12
+ _D3std4conv__T7toCharsVii8TaVEQBc5ascii10LetterCasei1TkZQBqFNaNbNiNfkZ6Result6__initZ@Base 12
+ _D3std4conv__T7toCharsVii8TaVEQBc5ascii10LetterCasei1TkZQBqFNaNbNiNfkZ6Result6lengthMFNaNbNdNiNfZm@Base 12
+ _D3std4conv__T7toCharsVii8TaVEQBc5ascii10LetterCasei1TkZQBqFNaNbNiNfkZ6Result7opIndexMFNaNbNiNfmZa@Base 12
+ _D3std4conv__T7toCharsVii8TaVEQBc5ascii10LetterCasei1TkZQBqFNaNbNiNfkZ6Result7opSliceMFNaNbNiNfmmZSQDtQDs__TQDqVii8TaVQDli1TkZQEiFNaNbNiNfkZQCs@Base 12
+ _D3std4conv__T7toCharsVii8TaVEQBc5ascii10LetterCasei1TkZQBqFNaNbNiNfkZ6Result7popBackMFNaNbNiNfZv@Base 12
+ _D3std4conv__T7toCharsVii8TaVEQBc5ascii10LetterCasei1TkZQBqFNaNbNiNfkZ6Result8popFrontMFNaNbNiNfZv@Base 12
+ _D3std4conv__T7toCharsVii8TaVEQBc5ascii10LetterCasei1TkZQBqFNaNbNiNfkZSQCrQCq__TQCoVii8TaVQCji1TkZQDgFNaNbNiNfkZ6Result@Base 12
+ _D3std4conv__T7toCharsVii8TaVEQBc5ascii10LetterCasei1TmZQBqFNaNbNiNfmZ6Result4backMFNaNbNdNiNfZa@Base 12
+ _D3std4conv__T7toCharsVii8TaVEQBc5ascii10LetterCasei1TmZQBqFNaNbNiNfmZ6Result4saveMFNaNbNdNiNfZSQDqQDp__TQDnVii8TaVQDii1TmZQEfFNaNbNiNfmZQCp@Base 12
+ _D3std4conv__T7toCharsVii8TaVEQBc5ascii10LetterCasei1TmZQBqFNaNbNiNfmZ6Result5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std4conv__T7toCharsVii8TaVEQBc5ascii10LetterCasei1TmZQBqFNaNbNiNfmZ6Result5frontMFNaNbNdNiNfZa@Base 12
+ _D3std4conv__T7toCharsVii8TaVEQBc5ascii10LetterCasei1TmZQBqFNaNbNiNfmZ6Result6__ctorMFNaNbNcNiNfmZSQDtQDs__TQDqVii8TaVQDli1TmZQEiFNaNbNiNfmZQCs@Base 12
+ _D3std4conv__T7toCharsVii8TaVEQBc5ascii10LetterCasei1TmZQBqFNaNbNiNfmZ6Result6__initZ@Base 12
+ _D3std4conv__T7toCharsVii8TaVEQBc5ascii10LetterCasei1TmZQBqFNaNbNiNfmZ6Result6lengthMFNaNbNdNiNfZm@Base 12
+ _D3std4conv__T7toCharsVii8TaVEQBc5ascii10LetterCasei1TmZQBqFNaNbNiNfmZ6Result7opIndexMFNaNbNiNfmZa@Base 12
+ _D3std4conv__T7toCharsVii8TaVEQBc5ascii10LetterCasei1TmZQBqFNaNbNiNfmZ6Result7opSliceMFNaNbNiNfmmZSQDtQDs__TQDqVii8TaVQDli1TmZQEiFNaNbNiNfmZQCs@Base 12
+ _D3std4conv__T7toCharsVii8TaVEQBc5ascii10LetterCasei1TmZQBqFNaNbNiNfmZ6Result7popBackMFNaNbNiNfZv@Base 12
+ _D3std4conv__T7toCharsVii8TaVEQBc5ascii10LetterCasei1TmZQBqFNaNbNiNfmZ6Result8popFrontMFNaNbNiNfZv@Base 12
+ _D3std4conv__T7toCharsVii8TaVEQBc5ascii10LetterCasei1TmZQBqFNaNbNiNfmZSQCrQCq__TQCoVii8TaVQCji1TmZQDgFNaNbNiNfmZ6Result@Base 12
+ _D3std4conv__T8textImplTAyaTAxaTQiTQhZQyFNaNbNfQtQzQxZQBe@Base 12
+ _D3std4conv__T8textImplTAyaTEQBb5regex8internal2ir2IRZQBoFNaNfQBiZQBq@Base 12
+ _D3std4conv__T8textImplTAyaTPSQBc11parallelism12AbstractTaskTaTQBjZQCbFNaNfQBvaQBzZQCh@Base 12
+ _D3std4conv__T8textImplTAyaTPxhTQiTQhZQyFNaNfQrQxQvZQBc@Base 12
+ _D3std4conv__T8textImplTAyaTQeTAxaTQlZQyFNaNbNfQxQsQBbZQBf@Base 12
+ _D3std4conv__T8textImplTAyaTQeTAxaZQvFNaNbNfQuQpZQz@Base 12
+ _D3std4conv__T8textImplTAyaTQeTQhTQkTAxaTQrZQBeFNaNbNfQBeQBhQBkQBaQBqZQBu@Base 12
+ _D3std4conv__T8textImplTAyaTQeTQhTQkTQnTkZQBcFNaNbNfQBcQBfQBiQBlkZQBq@Base 12
+ _D3std4conv__T8textImplTAyaTQeTQhTQkTQnZQBaFNaNbNfQBaQBdQBgQBjZQBn@Base 12
+ _D3std4conv__T8textImplTAyaTQeTQhTQkZQxFNaNbNfQwQyQBaZQBe@Base 12
+ _D3std4conv__T8textImplTAyaTQeTQhTiTQmTiTQrZQBeFNaNbNfQBeQBhiQBliQBpZQBt@Base 12
+ _D3std4conv__T8textImplTAyaTQeTQhZQuFNaNbNfQtQvZQy@Base 12
+ _D3std4conv__T8textImplTAyaTQeTaZQtFNaNbNfQsaZQw@Base 12
+ _D3std4conv__T8textImplTAyaTQeThTaTaTQnTmZQBcFNaNbNfQBchaaQBimZQBn@Base 12
+ _D3std4conv__T8textImplTAyaTQeTiTQjZQwFNaNbNfQviQyZQBb@Base 12
+ _D3std4conv__T8textImplTAyaTQeTiZQtFNaNbNfQsiZQw@Base 12
+ _D3std4conv__T8textImplTAyaTQeTkTQjTkZQyFNaNbNfQxkQBakZQBf@Base 12
+ _D3std4conv__T8textImplTAyaTQeTkTQjTmTQoZQBbFNaNbNfQBbkQBfmQBjZQBn@Base 12
+ _D3std4conv__T8textImplTAyaTQeTmTQjZQwFNaNbNfQvmQyZQBb@Base 12
+ _D3std4conv__T8textImplTAyaTQeTwTQjZQwFNaNfQtwQwZQz@Base 12
+ _D3std4conv__T8textImplTAyaTQeTxaTQkZQxFNaNbNfQwxaQBaZQBe@Base 12
+ _D3std4conv__T8textImplTAyaTQeTxaZQuFNaNbNfQtxaZQy@Base 12
+ _D3std4conv__T8textImplTAyaTQeZQrFNaNbNiNfQsZQv@Base 12
+ _D3std4conv__T8textImplTAyaTiZQqFNaNbNfiZQr@Base 12
+ _D3std4conv__T8textImplTAyaTkZQqFNaNbNfkZQr@Base 12
+ _D3std4conv__T8textImplTAyaTmZQqFNaNbNfmZQr@Base 12
+ _D3std4conv__T8unsignedThZQmFNaNbNiNfhZh@Base 12
+ _D3std4conv__T8unsignedTiZQmFNaNbNiNfiZk@Base 12
+ _D3std4conv__T8unsignedTkZQmFNaNbNiNfkZk@Base 12
+ _D3std4conv__T8unsignedTlZQmFNaNbNiNflZm@Base 12
+ _D3std4conv__T8unsignedTmZQmFNaNbNiNfmZm@Base 12
+ _D3std4conv__T8unsignedTtZQmFNaNbNiNftZt@Base 12
+ _D3std4conv__T8unsignedTxkZQnFNaNbNiNfxkZk@Base 12
+ _D3std4conv__T8unsignedTxlZQnFNaNbNiNfxlZm@Base 12
+ _D3std4conv__T8unsignedTxmZQnFNaNbNiNfxmZm@Base 12
+ _D3std4conv__T8unsignedTykZQnFNaNbNiNfykZk@Base 12
+ _D3std4conv__T9convErrorTAxaTiZQrFNaNfQnAyamZCQBsQBr13ConvException@Base 12
+ _D3std4conv__T9convErrorTAxaTkZQrFNaNfQnAyamZCQBsQBr13ConvException@Base 12
+ _D3std4conv__T9convErrorTAxaTtZQrFNaNfQnAyamZCQBsQBr13ConvException@Base 12
+ _D3std4file10attrIsFileFNaNbNiNfkZb@Base 12
+ _D3std4file10dirEntriesFAyaEQBaQz8SpanModebZSQBrQBq11DirIterator@Base 12
+ _D3std4file10dirEntriesFAyaQdEQBcQBb8SpanModebZ1fMFNaNbNfSQCeQCd8DirEntryZb@Base 12
+ _D3std4file10dirEntriesFAyaQdEQBcQBb8SpanModebZSQBu9algorithm9iteration__T12FilterResultS_DQDlQDkQDiFQCzQDcQDabZ1fMFNaNbNfSQErQEq8DirEntryZbTSQFkQFj11DirIteratorZQDk@Base 12
+ _D3std4file10existsImplFNbNiNeMPxaZb@Base 12
+ _D3std4file10removeImplFNeMAxaMPxaZv@Base 12
+ _D3std4file10renameImplFNeMAxaMQeMPxaMQeZv@Base 12
+ _D3std4file11DirIterator11__fieldDtorMFZv@Base 12
+ _D3std4file11DirIterator15__fieldPostblitMFNaNbNiNlZv@Base 12
+ _D3std4file11DirIterator5emptyMFNdNfZb@Base 12
+ _D3std4file11DirIterator5frontMFNdNfZSQBkQBj8DirEntry@Base 12
+ _D3std4file11DirIterator6__ctorMFNcNeAyaEQBnQBm8SpanModebZSQCfQCeQCc@Base 12
+ _D3std4file11DirIterator6__initZ@Base 12
+ _D3std4file11DirIterator8opAssignMFNcNjSQBmQBlQBjZQl@Base 12
+ _D3std4file11DirIterator8popFrontMFNfZv@Base 12
+ _D3std4file11__moduleRefZ@Base 12
+ _D3std4file11thisExePathFNeZAya@Base 12
+ _D3std4file12__ModuleInfoZ@Base 12
+ _D3std4file12mkdirRecurseFNfMAxaZv@Base 12
+ _D3std4file12rmdirRecurseFNfKSQBcQBb8DirEntryZv@Base 12
+ _D3std4file12rmdirRecurseFNfMAxaZv@Base 12
+ _D3std4file12rmdirRecurseFNfSQBbQBa8DirEntryZv@Base 12
+ _D3std4file12setTimesImplFNeMAxaMPxaSQBj8datetime7systime7SysTimeQBdZv@Base 12
+ _D3std4file13FileException6__ctorMFNaNfMAxaMQeAyamZCQByQBxQBv@Base 12
+ _D3std4file13FileException6__ctorMFNaNfMAxaMQeAyamkZCQBzQByQBw@Base 12
+ _D3std4file13FileException6__ctorMFNeMAxakAyamZCQBuQBtQBr@Base 12
+ _D3std4file13FileException6__initZ@Base 12
+ _D3std4file13FileException6__vtblZ@Base 12
+ _D3std4file13FileException7__ClassZ@Base 12
+ _D3std4file13attrIsSymlinkFNaNbNiNfkZb@Base 12
+ _D3std4file15DirIteratorImpl11__xopEqualsMxFKxSQBtQBsQBqZb@Base 12
+ _D3std4file15DirIteratorImpl11popDirStackMFNeZv@Base 12
+ _D3std4file15DirIteratorImpl15releaseDirStackMFNeZv@Base 12
+ _D3std4file15DirIteratorImpl4nextMFNeZb@Base 12
+ _D3std4file15DirIteratorImpl5emptyMFNdNfZb@Base 12
+ _D3std4file15DirIteratorImpl5frontMFNdNfZSQBoQBn8DirEntry@Base 12
+ _D3std4file15DirIteratorImpl6__dtorMFNfZv@Base 12
+ _D3std4file15DirIteratorImpl6__initZ@Base 12
+ _D3std4file15DirIteratorImpl6stepInMFNfAyaZ14trustedOpendirFNbNiNeQBbZPS4core3sys5posix6dirent3DIR@Base 12
+ _D3std4file15DirIteratorImpl6stepInMFNfAyaZb@Base 12
+ _D3std4file15DirIteratorImpl8hasExtraMFNfZb@Base 12
+ _D3std4file15DirIteratorImpl8opAssignMFNcNjNeSQBsQBrQBpZQl@Base 12
+ _D3std4file15DirIteratorImpl8popExtraMFNfZSQBpQBo8DirEntry@Base 12
+ _D3std4file15DirIteratorImpl8popFrontMFNfZv@Base 12
+ _D3std4file15DirIteratorImpl9DirHandle11__xopEqualsMxFKxSQCdQCcQCaQBmZb@Base 12
+ _D3std4file15DirIteratorImpl9DirHandle6__initZ@Base 12
+ _D3std4file15DirIteratorImpl9DirHandle9__xtoHashFNbNeKxSQCcQCbQBzQBlZm@Base 12
+ _D3std4file15DirIteratorImpl9__xtoHashFNbNeKxSQBsQBrQBpZm@Base 12
+ _D3std4file15DirIteratorImpl9mayStepInMFNfZb@Base 12
+ _D3std4file15DirIteratorImpl9pushExtraMFNfSQBpQBo8DirEntryZv@Base 12
+ _D3std4file15DirIteratorImpl__T6__ctorTAyaZQmMFNcNfQmEQCaQBz8SpanModebZSQCsQCrQCp@Base 12
+ _D3std4file21getAvailableDiskSpaceFNfMAxaZm@Base 12
+ _D3std4file6getcwdFNeZAya@Base 12
+ _D3std4file7tempDirFNeZ12addSeparatorFNaNbNfAyaZQe@Base 12
+ _D3std4file7tempDirFNeZ5cacheAya@Base 12
+ _D3std4file7tempDirFNeZAya@Base 12
+ _D3std4file7tempDirFNeZ__T15findExistingDirTAyaTQeTQhTQkTQnTQqZQBlFNfLQBaLQBeLQBiLQBmLQBqLQBuZQBy@Base 12
+ _D3std4file8DirEntry10attributesMFNdNlNfZk@Base 12
+ _D3std4file8DirEntry11__xopEqualsMxFKxSQBlQBkQBiZb@Base 12
+ _D3std4file8DirEntry14linkAttributesMFNdNlNfZk@Base 12
+ _D3std4file8DirEntry15_ensureStatDoneMFNlNeZv@Base 12
+ _D3std4file8DirEntry16_ensureLStatDoneMFNlNeZv@Base 12
+ _D3std4file8DirEntry16timeLastAccessedMFNdNlNfZSQBu8datetime7systime7SysTime@Base 12
+ _D3std4file8DirEntry16timeLastModifiedMFNdNlNfZSQBu8datetime7systime7SysTime@Base 12
+ _D3std4file8DirEntry17timeStatusChangedMFNdNlNfZSQBv8datetime7systime7SysTime@Base 12
+ _D3std4file8DirEntry22_ensureStatOrLStatDoneMFNlNeZv@Base 12
+ _D3std4file8DirEntry4nameMxFNaNbNdNjNfZAya@Base 12
+ _D3std4file8DirEntry4sizeMFNdNlNfZm@Base 12
+ _D3std4file8DirEntry5isDirMFNdNlNfZb@Base 12
+ _D3std4file8DirEntry6__ctorMFNcNfAyaPS4core3sys5posix6direntQhZSQCkQCjQCh@Base 12
+ _D3std4file8DirEntry6__ctorMFNcNfAyaZSQBkQBjQBh@Base 12
+ _D3std4file8DirEntry6__initZ@Base 12
+ _D3std4file8DirEntry6isFileMFNdNlNfZb@Base 12
+ _D3std4file8DirEntry7statBufMFNdNlNfZS4core3sys5posixQk4stat6stat_t@Base 12
+ _D3std4file8DirEntry9__xtoHashFNbNeKxSQBkQBjQBhZm@Base 12
+ _D3std4file8DirEntry9isSymlinkMFNdNlNfZb@Base 12
+ _D3std4file8copyImplFNeMAxaMQeMPxaMQeEQBk8typecons__T4FlagVAyaa18_707265736572766541747472696275746573ZQByZv@Base 12
+ _D3std4file8deletemeFNdNfZ8fileNameAya@Base 12
+ _D3std4file8deletemeFNdNfZAya@Base 12
+ _D3std4file8readImplFNeMAxaMPxamZAv@Base 12
+ _D3std4file9attrIsDirFNaNbNiNfkZb@Base 12
+ _D3std4file9writeImplFNeMAxaMPxaMAxvbZv@Base 12
+ _D3std4file__T13getAttributesTAxaZQuFNfQjZk@Base 12
+ _D3std4file__T13getAttributesTAyaZQuFNfQjZk@Base 12
+ _D3std4file__T15ensureDirExistsZQsFNfMAxaZb@Base 12
+ _D3std4file__T17statTimeToStdTimeVai109ZQBaFNaNbNfKxS4core3sys5posixQk4stat6stat_tZSQDe8datetime7systime7SysTime@Base 12
+ _D3std4file__T17statTimeToStdTimeVai97ZQzFNaNbNfKxS4core3sys5posixQk4stat6stat_tZSQDc8datetime7systime7SysTime@Base 12
+ _D3std4file__T17statTimeToStdTimeVai99ZQzFNaNbNfKxS4core3sys5posixQk4stat6stat_tZSQDc8datetime7systime7SysTime@Base 12
+ _D3std4file__T5isDirTAxaZQlFNdNfQlZb@Base 12
+ _D3std4file__T5isDirTAyaZQlFNdNfQlZb@Base 12
+ _D3std4file__T5rmdirTAyaZQlFNfQjZv@Base 12
+ _D3std4file__T5rmdirTAyaZQlFQhZ12trustedRmdirFNbNiNeMPxaZb@Base 12
+ _D3std4file__T6existsTAxaZQmFNbNiNfQnZb@Base 12
+ _D3std4file__T6existsTAyaZQmFNbNiNfQnZb@Base 12
+ _D3std4file__T6isFileTAyaZQmFNdNfQlZb@Base 12
+ _D3std4file__T6removeTAyaZQmFNfQjZv@Base 12
+ _D3std4file__T8cenforceTPS4core3sys5posix6dirent3DIRZQBnFNfQBjMLAxaAyamZQBw@Base 12
+ _D3std4file__T8cenforceTPaZQnFNfQiMLAxaAyamZQu@Base 12
+ _D3std4file__T8cenforceTbZQmFNebMAxaMPxaAyamZb@Base 12
+ _D3std4file__T8cenforceTbZQmFNfbMLAxaAyamZb@Base 12
+ _D3std4file__T8readLinkTAyaZQoFNfQjZQm@Base 12
+ _D3std4json11__moduleRefZ@Base 12
+ _D3std4json12__ModuleInfoZ@Base 12
+ _D3std4json13JSONException6__ctorMFNaNbNfAyaQdmZCQBvQBuQBs@Base 12
+ _D3std4json13JSONException6__ctorMFNaNbNfAyaiiZCQBuQBtQBr@Base 12
+ _D3std4json13JSONException6__initZ@Base 12
+ _D3std4json13JSONException6__vtblZ@Base 12
+ _D3std4json13JSONException7__ClassZ@Base 12
+ _D3std4json16JSONFloatLiteral6__initZ@Base 12
+ _D3std4json6toJSONFNfKxSQwQu9JSONValueIbIEQBoQBn11JSONOptionsZAya@Base 12
+ _D3std4json9JSONValue10arrayNoRefMNgFNaNdNeZNgASQBuQBtQBr@Base 12
+ _D3std4json9JSONValue11objectNoRefMNgFNaNdNeZNgHAyaSQByQBxQBv@Base 12
+ _D3std4json9JSONValue14toPrettyStringMxFNfIEQBqQBp11JSONOptionsZAya@Base 12
+ _D3std4json9JSONValue3strMFNaNbNdNiNjNeNkMAyaZQe@Base 12
+ _D3std4json9JSONValue3strMxFNaNdNjNeZAya@Base 12
+ _D3std4json9JSONValue4typeMxFNaNbNdNiNfZEQBnQBm8JSONType@Base 12
+ _D3std4json9JSONValue5Store6__initZ@Base 12
+ _D3std4json9JSONValue5arrayMFNaNbNdNiNlNeNkMASQBsQBrQBpZQm@Base 12
+ _D3std4json9JSONValue5arrayMNgFNaNcNdNjZNgASQBqQBpQBn@Base 12
+ _D3std4json9JSONValue6__initZ@Base 12
+ _D3std4json9JSONValue6isNullMxFNaNbNdNiNfZb@Base 12
+ _D3std4json9JSONValue6objectMFNaNbNdNiNeNkMHAyaSQBuQBtQBrZQp@Base 12
+ _D3std4json9JSONValue6objectMNgFNaNcNdNjZNgHAyaSQBuQBtQBr@Base 12
+ _D3std4json9JSONValue7booleanMFNaNbNdNiNfbZb@Base 12
+ _D3std4json9JSONValue7booleanMxFNaNdNfZb@Base 12
+ _D3std4json9JSONValue7integerMFNaNbNdNiNflZl@Base 12
+ _D3std4json9JSONValue7integerMxFNaNdNfZl@Base 12
+ _D3std4json9JSONValue7opApplyMFMDFAyaKSQBlQBkQBiZiZi@Base 12
+ _D3std4json9JSONValue7opApplyMFMDFmKSQBjQBiQBgZiZi@Base 12
+ _D3std4json9JSONValue7opIndexMNgFNaNcNfNkMAyaZNgSQBvQBuQBs@Base 12
+ _D3std4json9JSONValue7opIndexMNgFNaNcNfmZNgSQBqQBpQBn@Base 12
+ _D3std4json9JSONValue8floatingMFNaNbNdNiNfdZd@Base 12
+ _D3std4json9JSONValue8floatingMxFNaNdNfZd@Base 12
+ _D3std4json9JSONValue8opEqualsMxFNaNbNiNeKxSQBqQBpQBnZb@Base 12
+ _D3std4json9JSONValue8opEqualsMxFNaNbNiNfxSQBpQBoQBmZb@Base 12
+ _D3std4json9JSONValue8toStringMxFNfIEQBjQBi11JSONOptionsZAya@Base 12
+ _D3std4json9JSONValue8uintegerMFNaNbNdNiNfmZm@Base 12
+ _D3std4json9JSONValue8uintegerMxFNaNdNfZm@Base 12
+ _D3std4json9JSONValue__T6assignTASQBgQBfQBdZQuMFNaNbNiNfQyZv@Base 12
+ _D3std4json9JSONValue__T6assignTAyaZQmMFNaNbNiNfQqZv@Base 12
+ _D3std4json9JSONValue__T6assignTHAyaSQBjQBiQBgZQxMFNaNbNiNfQBbZv@Base 12
+ _D3std4json9JSONValue__T6assignTbZQkMFNaNbNiNfbZv@Base 12
+ _D3std4json9JSONValue__T6assignTdZQkMFNaNbNiNfdZv@Base 12
+ _D3std4json9JSONValue__T6assignTlZQkMFNaNbNiNflZv@Base 12
+ _D3std4json9JSONValue__T6assignTmZQkMFNaNbNiNfmZv@Base 12
+ _D3std4json__T6toJSONTSQv5array__T8AppenderTAyaZQoZQBlFNfKQBkKxSQCkQCj9JSONValueIbIEQDeQDd11JSONOptionsZv@Base 12
+ _D3std4math10operations11__moduleRefZ@Base 12
+ _D3std4math10operations12__ModuleInfoZ@Base 12
+ _D3std4math10operations13getNaNPayloadFNaNbNiNeeZm@Base 12
+ _D3std4math10operations3NaNFNaNbNiNemZe@Base 12
+ _D3std4math10operations3fmaFNaNbNiNfeeeZe@Base 12
+ _D3std4math10operations4fdimFNaNbNiNfeeZe@Base 12
+ _D3std4math10operations6nextUpFNaNbNiNedZd@Base 12
+ _D3std4math10operations6nextUpFNaNbNiNeeZe@Base 12
+ _D3std4math10operations6nextUpFNaNbNiNefZf@Base 12
+ _D3std4math10operations8nextDownFNaNbNiNfdZd@Base 12
+ _D3std4math10operations8nextDownFNaNbNiNfeZe@Base 12
+ _D3std4math10operations8nextDownFNaNbNiNffZf@Base 12
+ _D3std4math10operations__T17extractBitpatternTdZQwFNaNbNiNexdZSQCjQCiQCg__T23FloatingPointBitpatternTdZQBc@Base 12
+ _D3std4math10operations__T17extractBitpatternTeZQwFNaNbNiNexeZSQCjQCiQCg__T23FloatingPointBitpatternTeZQBc@Base 12
+ _D3std4math10operations__T23FloatingPointBitpatternTdZQBc6__initZ@Base 12
+ _D3std4math10operations__T23FloatingPointBitpatternTeZQBc6__initZ@Base 12
+ _D3std4math11__moduleRefZ@Base 12
+ _D3std4math11exponential10logCoeffsPyG7e@Base 12
+ _D3std4math11exponential10logCoeffsQyG7e@Base 12
+ _D3std4math11exponential10logCoeffsRyG4e@Base 12
+ _D3std4math11exponential10logCoeffsSyG4e@Base 12
+ _D3std4math11exponential11__moduleRefZ@Base 12
+ _D3std4math11exponential12__ModuleInfoZ@Base 12
+ _D3std4math11exponential3expFNaNbNiNeeZe@Base 12
+ _D3std4math11exponential3expFNaNbNiNfdZd@Base 12
+ _D3std4math11exponential3expFNaNbNiNffZf@Base 12
+ _D3std4math11exponential3logFNaNbNiNfeZe@Base 12
+ _D3std4math11exponential4exp2FNaNbNiNeeZe@Base 12
+ _D3std4math11exponential4exp2FNaNbNiNfdZd@Base 12
+ _D3std4math11exponential4exp2FNaNbNiNffZf@Base 12
+ _D3std4math11exponential4log2FNaNbNiNfeZe@Base 12
+ _D3std4math11exponential4logbFNbNiNeeZe@Base 12
+ _D3std4math11exponential5expm1FNaNbNiNeeZe@Base 12
+ _D3std4math11exponential5expm1FNaNbNiNfdZd@Base 12
+ _D3std4math11exponential5expm1FNaNbNiNffZf@Base 12
+ _D3std4math11exponential5ldexpFNaNbNiNfdiZd@Base 12
+ _D3std4math11exponential5ldexpFNaNbNiNfeiZe@Base 12
+ _D3std4math11exponential5ldexpFNaNbNiNffiZf@Base 12
+ _D3std4math11exponential5log10FNaNbNiNfeZe@Base 12
+ _D3std4math11exponential5log1pFNaNbNiNfeZe@Base 12
+ _D3std4math11exponential6scalbnFNaNbNiNfdiZd@Base 12
+ _D3std4math11exponential6scalbnFNaNbNiNfeiZe@Base 12
+ _D3std4math11exponential6scalbnFNaNbNiNffiZf@Base 12
+ _D3std4math11exponential__T3powTdTdZQjFNaNbNiNeddZ4implFNaNbNiNfeeZe@Base 12
+ _D3std4math11exponential__T3powTdTdZQjFNaNbNiNeddZd@Base 12
+ _D3std4math11exponential__T3powTeTeZQjFNaNbNiNeeeZ4implFNaNbNiNfeeZe@Base 12
+ _D3std4math11exponential__T3powTeTeZQjFNaNbNiNeeeZe@Base 12
+ _D3std4math11exponential__T3powTeTiZQjFNaNbNiNeeiZe@Base 12
+ _D3std4math11exponential__T3powTeTlZQjFNaNbNiNeelZe@Base 12
+ _D3std4math11exponential__T3powTiTiZQjFNaNbNiNeiiZi@Base 12
+ _D3std4math11exponential__T3powTmTmZQjFNaNbNiNemmZm@Base 12
+ _D3std4math11exponential__T5frexpTeZQjFNaNbNiNexeJiZe@Base 12
+ _D3std4math11exponential__T7expImplTdZQlFNaNbNiNfdZ1PyG3d@Base 12
+ _D3std4math11exponential__T7expImplTdZQlFNaNbNiNfdZ1QyG4d@Base 12
+ _D3std4math11exponential__T7expImplTdZQlFNaNbNiNfdZd@Base 12
+ _D3std4math11exponential__T7expImplTeZQlFNaNbNiNfeZ1PyG3e@Base 12
+ _D3std4math11exponential__T7expImplTeZQlFNaNbNiNfeZ1QyG4e@Base 12
+ _D3std4math11exponential__T7expImplTeZQlFNaNbNiNfeZe@Base 12
+ _D3std4math11exponential__T7expImplTfZQlFNaNbNiNffZ1PyG6f@Base 12
+ _D3std4math11exponential__T7expImplTfZQlFNaNbNiNffZf@Base 12
+ _D3std4math11exponential__T8exp2ImplTdZQmFNaNbNiNfdZ1PyG3d@Base 12
+ _D3std4math11exponential__T8exp2ImplTdZQmFNaNbNiNfdZ1QyG3d@Base 12
+ _D3std4math11exponential__T8exp2ImplTdZQmFNaNbNiNfdZd@Base 12
+ _D3std4math11exponential__T8exp2ImplTeZQmFNaNbNiNfeZ1PyG3e@Base 12
+ _D3std4math11exponential__T8exp2ImplTeZQmFNaNbNiNfeZ1QyG4e@Base 12
+ _D3std4math11exponential__T8exp2ImplTeZQmFNaNbNiNfeZe@Base 12
+ _D3std4math11exponential__T8exp2ImplTfZQmFNaNbNiNffZ1PyG6f@Base 12
+ _D3std4math11exponential__T8exp2ImplTfZQmFNaNbNiNffZf@Base 12
+ _D3std4math11exponential__T9expm1ImplTdZQnFNaNbNiNfdZ1PyG3d@Base 12
+ _D3std4math11exponential__T9expm1ImplTdZQnFNaNbNiNfdZ1QyG4d@Base 12
+ _D3std4math11exponential__T9expm1ImplTdZQnFNaNbNiNfdZd@Base 12
+ _D3std4math11exponential__T9expm1ImplTeZQnFNaNbNiNfeZ1PyG5e@Base 12
+ _D3std4math11exponential__T9expm1ImplTeZQnFNaNbNiNfeZ1QyG6e@Base 12
+ _D3std4math11exponential__T9expm1ImplTeZQnFNaNbNiNfeZe@Base 12
+ _D3std4math12__ModuleInfoZ@Base 12
+ _D3std4math12trigonometry11__moduleRefZ@Base 12
+ _D3std4math12trigonometry12__ModuleInfoZ@Base 12
+ _D3std4math12trigonometry3cosFNaNbNiNfdZd@Base 12
+ _D3std4math12trigonometry3cosFNaNbNiNfeZe@Base 12
+ _D3std4math12trigonometry3cosFNaNbNiNffZf@Base 12
+ _D3std4math12trigonometry3sinFNaNbNiNfdZd@Base 12
+ _D3std4math12trigonometry3sinFNaNbNiNfeZe@Base 12
+ _D3std4math12trigonometry3sinFNaNbNiNffZf@Base 12
+ _D3std4math12trigonometry3tanFNaNbNiNfdZd@Base 12
+ _D3std4math12trigonometry3tanFNaNbNiNfeZe@Base 12
+ _D3std4math12trigonometry3tanFNaNbNiNffZf@Base 12
+ _D3std4math12trigonometry4acosFNaNbNiNfdZd@Base 12
+ _D3std4math12trigonometry4acosFNaNbNiNfeZe@Base 12
+ _D3std4math12trigonometry4acosFNaNbNiNffZf@Base 12
+ _D3std4math12trigonometry4asinFNaNbNiNfdZd@Base 12
+ _D3std4math12trigonometry4asinFNaNbNiNfeZe@Base 12
+ _D3std4math12trigonometry4asinFNaNbNiNffZf@Base 12
+ _D3std4math12trigonometry4atanFNaNbNiNfdZd@Base 12
+ _D3std4math12trigonometry4atanFNaNbNiNfeZe@Base 12
+ _D3std4math12trigonometry4atanFNaNbNiNffZf@Base 12
+ _D3std4math12trigonometry4coshFNaNbNiNfdZd@Base 12
+ _D3std4math12trigonometry4coshFNaNbNiNfeZe@Base 12
+ _D3std4math12trigonometry4coshFNaNbNiNffZf@Base 12
+ _D3std4math12trigonometry4sinhFNaNbNiNfdZd@Base 12
+ _D3std4math12trigonometry4sinhFNaNbNiNfeZe@Base 12
+ _D3std4math12trigonometry4sinhFNaNbNiNffZf@Base 12
+ _D3std4math12trigonometry4tanhFNaNbNiNfdZd@Base 12
+ _D3std4math12trigonometry4tanhFNaNbNiNfeZe@Base 12
+ _D3std4math12trigonometry4tanhFNaNbNiNffZf@Base 12
+ _D3std4math12trigonometry5acoshFNaNbNiNfdZd@Base 12
+ _D3std4math12trigonometry5acoshFNaNbNiNfeZe@Base 12
+ _D3std4math12trigonometry5acoshFNaNbNiNffZf@Base 12
+ _D3std4math12trigonometry5asinhFNaNbNiNfdZd@Base 12
+ _D3std4math12trigonometry5asinhFNaNbNiNfeZe@Base 12
+ _D3std4math12trigonometry5asinhFNaNbNiNffZf@Base 12
+ _D3std4math12trigonometry5atan2FNaNbNiNeeeZe@Base 12
+ _D3std4math12trigonometry5atan2FNaNbNiNfddZd@Base 12
+ _D3std4math12trigonometry5atan2FNaNbNiNfffZf@Base 12
+ _D3std4math12trigonometry5atanhFNaNbNiNfdZd@Base 12
+ _D3std4math12trigonometry5atanhFNaNbNiNfeZe@Base 12
+ _D3std4math12trigonometry5atanhFNaNbNiNffZf@Base 12
+ _D3std4math12trigonometry__T5_sinhTdZQjFNaNbNiNfdZd@Base 12
+ _D3std4math12trigonometry__T5_sinhTeZQjFNaNbNiNfeZe@Base 12
+ _D3std4math12trigonometry__T5_sinhTfZQjFNaNbNiNffZf@Base 12
+ _D3std4math12trigonometry__T5_tanhTdZQjFNaNbNiNfdZd@Base 12
+ _D3std4math12trigonometry__T5_tanhTeZQjFNaNbNiNfeZe@Base 12
+ _D3std4math12trigonometry__T5_tanhTfZQjFNaNbNiNffZf@Base 12
+ _D3std4math12trigonometry__T6_acoshTdZQkFNaNbNiNfdZd@Base 12
+ _D3std4math12trigonometry__T6_acoshTeZQkFNaNbNiNfeZe@Base 12
+ _D3std4math12trigonometry__T6_acoshTfZQkFNaNbNiNffZf@Base 12
+ _D3std4math12trigonometry__T6_asinhTdZQkFNaNbNiNfdZd@Base 12
+ _D3std4math12trigonometry__T6_asinhTeZQkFNaNbNiNfeZe@Base 12
+ _D3std4math12trigonometry__T6_asinhTfZQkFNaNbNiNffZf@Base 12
+ _D3std4math12trigonometry__T7tanImplTdZQlFNaNbNiNfdZ1PyG3d@Base 12
+ _D3std4math12trigonometry__T7tanImplTdZQlFNaNbNiNfdZ1QyG5d@Base 12
+ _D3std4math12trigonometry__T7tanImplTdZQlFNaNbNiNfdZd@Base 12
+ _D3std4math12trigonometry__T7tanImplTeZQlFNaNbNiNfeZ1PyG3e@Base 12
+ _D3std4math12trigonometry__T7tanImplTeZQlFNaNbNiNfeZ1QyG5e@Base 12
+ _D3std4math12trigonometry__T7tanImplTeZQlFNaNbNiNfeZe@Base 12
+ _D3std4math12trigonometry__T7tanImplTfZQlFNaNbNiNffZ1PyG6f@Base 12
+ _D3std4math12trigonometry__T7tanImplTfZQlFNaNbNiNffZf@Base 12
+ _D3std4math12trigonometry__T8atanImplTdZQmFNaNbNiNfdZ1PyG5d@Base 12
+ _D3std4math12trigonometry__T8atanImplTdZQmFNaNbNiNfdZ1QyG6d@Base 12
+ _D3std4math12trigonometry__T8atanImplTdZQmFNaNbNiNfdZd@Base 12
+ _D3std4math12trigonometry__T8atanImplTeZQmFNaNbNiNfeZ1PyG5e@Base 12
+ _D3std4math12trigonometry__T8atanImplTeZQmFNaNbNiNfeZ1QyG6e@Base 12
+ _D3std4math12trigonometry__T8atanImplTeZQmFNaNbNiNfeZe@Base 12
+ _D3std4math12trigonometry__T8atanImplTfZQmFNaNbNiNffZ1PyG4f@Base 12
+ _D3std4math12trigonometry__T8atanImplTfZQmFNaNbNiNffZf@Base 12
+ _D3std4math12trigonometry__T9atan2ImplTdZQnFNaNbNiNfddZd@Base 12
+ _D3std4math12trigonometry__T9atan2ImplTeZQnFNaNbNiNfeeZe@Base 12
+ _D3std4math12trigonometry__T9atan2ImplTfZQnFNaNbNiNfffZf@Base 12
+ _D3std4math6traits11__moduleRefZ@Base 12
+ _D3std4math6traits11isIdenticalFNaNbNiNeeeZb@Base 12
+ _D3std4math6traits12__ModuleInfoZ@Base 12
+ _D3std4math6traits__T10isInfinityTdZQpFNaNbNiNedZb@Base 12
+ _D3std4math6traits__T10isInfinityTeZQpFNaNbNiNeeZb@Base 12
+ _D3std4math6traits__T10isInfinityTfZQpFNaNbNiNefZb@Base 12
+ _D3std4math6traits__T10isInfinityTxdZQqFNaNbNiNexdZb@Base 12
+ _D3std4math6traits__T10isPowerOf2TkZQpFNaNbNiNfxkZb@Base 12
+ _D3std4math6traits__T10isPowerOf2TmZQpFNaNbNiNfxmZb@Base 12
+ _D3std4math6traits__T11isSubnormalTxeZQrFNaNbNiNexeZb@Base 12
+ _D3std4math6traits__T5isNaNTdZQjFNaNbNiNedZb@Base 12
+ _D3std4math6traits__T5isNaNTeZQjFNaNbNiNeeZb@Base 12
+ _D3std4math6traits__T5isNaNTfZQjFNaNbNiNefZb@Base 12
+ _D3std4math6traits__T5isNaNTxdZQkFNaNbNiNexdZb@Base 12
+ _D3std4math6traits__T5isNaNTxeZQkFNaNbNiNexeZb@Base 12
+ _D3std4math6traits__T7signbitTdZQlFNaNbNiNedZi@Base 12
+ _D3std4math6traits__T7signbitTeZQlFNaNbNiNeeZi@Base 12
+ _D3std4math6traits__T7signbitTfZQlFNaNbNiNefZi@Base 12
+ _D3std4math6traits__T7signbitTxeZQmFNaNbNiNexeZi@Base 12
+ _D3std4math6traits__T7signbitTyeZQmFNaNbNiNeyeZi@Base 12
+ _D3std4math6traits__T8copysignTdTdZQoFNaNbNiNeddZd@Base 12
+ _D3std4math6traits__T8copysignTdTiZQoFNaNbNiNeidZd@Base 12
+ _D3std4math6traits__T8copysignTeTdZQoFNaNbNiNeedZe@Base 12
+ _D3std4math6traits__T8copysignTeTeZQoFNaNbNiNeeeZe@Base 12
+ _D3std4math6traits__T8copysignTeTfZQoFNaNbNiNeefZe@Base 12
+ _D3std4math6traits__T8copysignTeTiZQoFNaNbNiNeieZe@Base 12
+ _D3std4math6traits__T8copysignTfTfZQoFNaNbNiNeffZf@Base 12
+ _D3std4math6traits__T8copysignTfTiZQoFNaNbNiNeifZf@Base 12
+ _D3std4math8hardware11__moduleRefZ@Base 12
+ _D3std4math8hardware12__ModuleInfoZ@Base 12
+ _D3std4math8hardware14resetIeeeFlagsFNbNiNeZv@Base 12
+ _D3std4math8hardware20FloatingPointControl10initializeMFNbNiNfZv@Base 12
+ _D3std4math8hardware20FloatingPointControl15clearExceptionsFNbNiNfZv@Base 12
+ _D3std4math8hardware20FloatingPointControl15getControlStateFNaNbNiNeZt@Base 12
+ _D3std4math8hardware20FloatingPointControl15setControlStateFNbNiNetZv@Base 12
+ _D3std4math8hardware20FloatingPointControl16enableExceptionsMFNbNiNekZv@Base 12
+ _D3std4math8hardware20FloatingPointControl17disableExceptionsMFNbNiNekZv@Base 12
+ _D3std4math8hardware20FloatingPointControl17enabledExceptionsFNaNbNdNiNeZk@Base 12
+ _D3std4math8hardware20FloatingPointControl17hasExceptionTrapsFNaNbNdNiNfZb@Base 12
+ _D3std4math8hardware20FloatingPointControl6__dtorMFNbNiNeZv@Base 12
+ _D3std4math8hardware20FloatingPointControl6__initZ@Base 12
+ _D3std4math8hardware20FloatingPointControl8opAssignMFNbNcNiNjNeSQCkQCjQChQCbZQo@Base 12
+ _D3std4math8hardware20FloatingPointControl8roundingFNaNbNdNiNeZk@Base 12
+ _D3std4math8hardware20FloatingPointControl8roundingMFNbNdNiNekZv@Base 12
+ _D3std4math8hardware9IeeeFlags12getIeeeFlagsFNaNbNiNeZk@Base 12
+ _D3std4math8hardware9IeeeFlags14resetIeeeFlagsFNbNiNeZv@Base 12
+ _D3std4math8hardware9IeeeFlags6__initZ@Base 12
+ _D3std4math8hardware9IeeeFlags7inexactMxFNbNdNiNfZb@Base 12
+ _D3std4math8hardware9IeeeFlags7invalidMxFNbNdNiNfZb@Base 12
+ _D3std4math8hardware9IeeeFlags8overflowMxFNbNdNiNfZb@Base 12
+ _D3std4math8hardware9IeeeFlags9divByZeroMxFNbNdNiNfZb@Base 12
+ _D3std4math8hardware9IeeeFlags9underflowMxFNbNdNiNfZb@Base 12
+ _D3std4math8hardware9ieeeFlagsFNaNbNdNiNeZSQBpQBoQBm9IeeeFlags@Base 12
+ _D3std4math8rounding11__moduleRefZ@Base 12
+ _D3std4math8rounding12__ModuleInfoZ@Base 12
+ _D3std4math8rounding4ceilFNaNbNiNedZd@Base 12
+ _D3std4math8rounding4ceilFNaNbNiNeeZe@Base 12
+ _D3std4math8rounding4ceilFNaNbNiNefZf@Base 12
+ _D3std4math8rounding4rintFNaNbNiNfdZd@Base 12
+ _D3std4math8rounding4rintFNaNbNiNfeZe@Base 12
+ _D3std4math8rounding4rintFNaNbNiNffZf@Base 12
+ _D3std4math8rounding5floorFNaNbNiNedZd@Base 12
+ _D3std4math8rounding5floorFNaNbNiNeeZe@Base 12
+ _D3std4math8rounding5floorFNaNbNiNefZf@Base 12
+ _D3std4math8rounding5lrintFNaNbNiNeeZl@Base 12
+ _D3std4math8rounding5roundFNaNbNiNeeZe@Base 12
+ _D3std4math8rounding5truncFNaNbNiNeeZe@Base 12
+ _D3std4math8rounding6lroundFNbNiNeeZl@Base 12
+ _D3std4math8rounding6rndtolFNaNbNiNfdZl@Base 12
+ _D3std4math8rounding6rndtolFNaNbNiNfeZl@Base 12
+ _D3std4math8rounding6rndtolFNaNbNiNffZl@Base 12
+ _D3std4math8rounding9nearbyintFNaNbNiNfeZe@Base 12
+ _D3std4math8rounding__T9floorImplTdZQnFNaNbNiNexdZ9floatBits6__initZ@Base 12
+ _D3std4math8rounding__T9floorImplTdZQnFNaNbNiNexdZd@Base 12
+ _D3std4math8rounding__T9floorImplTeZQnFNaNbNiNexeZ9floatBits6__initZ@Base 12
+ _D3std4math8rounding__T9floorImplTeZQnFNaNbNiNexeZe@Base 12
+ _D3std4math8rounding__T9floorImplTfZQnFNaNbNiNexfZ9floatBits6__initZ@Base 12
+ _D3std4math8rounding__T9floorImplTfZQnFNaNbNiNexfZf@Base 12
+ _D3std4math9algebraic11__moduleRefZ@Base 12
+ _D3std4math9algebraic12__ModuleInfoZ@Base 12
+ _D3std4math9algebraic4cbrtFNbNiNeeZe@Base 12
+ _D3std4math9algebraic4fabsFNaNbNiNfdZd@Base 12
+ _D3std4math9algebraic4fabsFNaNbNiNfeZe@Base 12
+ _D3std4math9algebraic4fabsFNaNbNiNffZf@Base 12
+ _D3std4math9algebraic4sqrtFNaNbNiNfdZd@Base 12
+ _D3std4math9algebraic4sqrtFNaNbNiNfeZe@Base 12
+ _D3std4math9algebraic4sqrtFNaNbNiNffZf@Base 12
+ _D3std4math9algebraic8polyImplFNaNbNiNeeIAeZe@Base 12
+ _D3std4math9algebraic__T12polyImplBaseTeTeZQtFNaNbNiNeeIAeZe@Base 12
+ _D3std4math9algebraic__T4polyTdTdVii3ZQoFNaNbNiNfdKxG3dZd@Base 12
+ _D3std4math9algebraic__T4polyTdTdVii4ZQoFNaNbNiNfdKxG4dZd@Base 12
+ _D3std4math9algebraic__T4polyTdTdVii5ZQoFNaNbNiNfdKxG5dZd@Base 12
+ _D3std4math9algebraic__T4polyTdTdVii6ZQoFNaNbNiNfdKxG6dZd@Base 12
+ _D3std4math9algebraic__T4polyTeTeVii3ZQoFNaNbNiNfeKxG3eZe@Base 12
+ _D3std4math9algebraic__T4polyTeTeVii4ZQoFNaNbNiNfeKxG4eZe@Base 12
+ _D3std4math9algebraic__T4polyTeTeVii5ZQoFNaNbNiNfeKxG5eZe@Base 12
+ _D3std4math9algebraic__T4polyTeTeVii6ZQoFNaNbNiNfeKxG6eZe@Base 12
+ _D3std4math9algebraic__T4polyTeTeVii7ZQoFNaNbNiNfeKxG7eZe@Base 12
+ _D3std4math9algebraic__T4polyTeTeVii8ZQoFNaNbNiNfeKxG8eZe@Base 12
+ _D3std4math9algebraic__T4polyTeTeVii9ZQoFNaNbNiNfeKxG9eZe@Base 12
+ _D3std4math9algebraic__T4polyTeTeZQkFNaNbNiNeeIAeZe@Base 12
+ _D3std4math9algebraic__T4polyTfTfVii6ZQoFNaNbNiNffKxG6fZf@Base 12
+ _D3std4math9algebraic__T4polyTxdTdVii3ZQpFNaNbNiNfxdKxG3dZd@Base 12
+ _D3std4math9algebraic__T4polyTxdTdVii4ZQpFNaNbNiNfxdKxG4dZd@Base 12
+ _D3std4math9algebraic__T4polyTxdTdVii5ZQpFNaNbNiNfxdKxG5dZd@Base 12
+ _D3std4math9algebraic__T4polyTxeTeVii3ZQpFNaNbNiNfxeKxG3eZe@Base 12
+ _D3std4math9algebraic__T4polyTxeTeVii4ZQpFNaNbNiNfxeKxG4eZe@Base 12
+ _D3std4math9algebraic__T4polyTxeTeVii5ZQpFNaNbNiNfxeKxG5eZe@Base 12
+ _D3std4math9algebraic__T4polyTxeTeVii6ZQpFNaNbNiNfxeKxG6eZe@Base 12
+ _D3std4math9algebraic__T4polyTxfTfVii4ZQpFNaNbNiNfxfKxG4fZf@Base 12
+ _D3std4math9algebraic__T4polyTxfTfVii6ZQpFNaNbNiNfxfKxG6fZf@Base 12
+ _D3std4math9algebraic__T4polyTyeTeVii7ZQpFNaNbNiNfyeKxG7eZe@Base 12
+ _D3std4math9algebraic__T8nextPow2TmZQmFNaNbNiNfxmZm@Base 12
+ _D3std4math9algebraic__T9truncPow2TmZQnFNaNbNiNfxmZm@Base 12
+ _D3std4math9constants11__moduleRefZ@Base 12
+ _D3std4math9constants12__ModuleInfoZ@Base 12
+ _D3std4math9remainder11__moduleRefZ@Base 12
+ _D3std4math9remainder12__ModuleInfoZ@Base 12
+ _D3std4math9remainder4fmodFNbNiNeeeZe@Base 12
+ _D3std4math9remainder4modfFNbNiNeeKeZe@Base 12
+ _D3std4math9remainder6remquoFNbNiNeeeJiZe@Base 12
+ _D3std4math9remainderQkFNbNiNeeeZe@Base 12
+ _D3std4math__T8ieeeMeanTeZQmFNaNbNiNexexeZe@Base 12
+ _D3std4meta11__moduleRefZ@Base 12
+ _D3std4meta12__ModuleInfoZ@Base 12
+ _D3std4path11__moduleRefZ@Base 12
+ _D3std4path11expandTildeFNbNfAyaZ18expandFromDatabaseFNbNfQBdZQBh@Base 12
+ _D3std4path11expandTildeFNbNfAyaZ21combineCPathWithDPathFNaNbNePaQBkmZQBp@Base 12
+ _D3std4path11expandTildeFNbNfAyaZ21expandFromEnvironmentFNbNfQBgZ9__lambda2FNbNiNeZPa@Base 12
+ _D3std4path11expandTildeFNbNfAyaZ21expandFromEnvironmentFNbNfQBgZQBk@Base 12
+ _D3std4path11expandTildeFNbNfAyaZQe@Base 12
+ _D3std4path12__ModuleInfoZ@Base 12
+ _D3std4path12absolutePathFNaNfAyaLQeZQh@Base 12
+ _D3std4path14isDirSeparatorFNaNbNiNfwZb@Base 12
+ _D3std4path16isDriveSeparatorFNaNbNiNfwZb@Base 12
+ _D3std4path__T10stripDriveTxaZQqFNaNbNiNfAxaZQe@Base 12
+ _D3std4path__T10stripDriveTyaZQqFNaNbNiNfAyaZQe@Base 12
+ _D3std4path__T11_stripDriveTAxaZQsFNaNbNiNfQpZQs@Base 12
+ _D3std4path__T11_stripDriveTAyaZQsFNaNbNiNfQpZQs@Base 12
+ _D3std4path__T12pathSplitterTSQBc5range__T5chainTSQBw3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDvQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFkFNaNbNiNfQFhZSQGnQGm__TQGkTQFzZQGsFQGhZ12PathSplitter@Base 12
+ _D3std4path__T12pathSplitterTSQBc5range__T5chainTSQBw3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDvQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFkFQEzZ12PathSplitter11__xopEqualsMxFKxSQHlQHk__TQHiTQGxZQHqFQHfZQCgZb@Base 12
+ _D3std4path__T12pathSplitterTSQBc5range__T5chainTSQBw3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDvQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFkFQEzZ12PathSplitter4backMFNaNbNdNiNfZQGj@Base 12
+ _D3std4path__T12pathSplitterTSQBc5range__T5chainTSQBw3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDvQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFkFQEzZ12PathSplitter4saveMFNaNbNdNiNfZSQHlQHk__TQHiTQGxZQHqFQHfZQCg@Base 12
+ _D3std4path__T12pathSplitterTSQBc5range__T5chainTSQBw3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDvQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFkFQEzZ12PathSplitter5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std4path__T12pathSplitterTSQBc5range__T5chainTSQBw3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDvQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFkFQEzZ12PathSplitter5frontMFNaNbNdNiNfZQGk@Base 12
+ _D3std4path__T12pathSplitterTSQBc5range__T5chainTSQBw3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDvQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFkFQEzZ12PathSplitter5ltrimMFNaNbNiNfmmZm@Base 12
+ _D3std4path__T12pathSplitterTSQBc5range__T5chainTSQBw3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDvQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFkFQEzZ12PathSplitter5rtrimMFNaNbNiNfmmZm@Base 12
+ _D3std4path__T12pathSplitterTSQBc5range__T5chainTSQBw3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDvQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFkFQEzZ12PathSplitter6__ctorMFNaNbNcNiNfQGkZSQHqQHp__TQHnTQHcZQHvFQHkZQCl@Base 12
+ _D3std4path__T12pathSplitterTSQBc5range__T5chainTSQBw3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDvQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFkFQEzZ12PathSplitter6__initZ@Base 12
+ _D3std4path__T12pathSplitterTSQBc5range__T5chainTSQBw3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDvQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFkFQEzZ12PathSplitter7popBackMFNaNbNiNfZv@Base 12
+ _D3std4path__T12pathSplitterTSQBc5range__T5chainTSQBw3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDvQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFkFQEzZ12PathSplitter8popFrontMFNaNbNiNfZv@Base 12
+ _D3std4path__T12pathSplitterTSQBc5range__T5chainTSQBw3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDvQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFkFQEzZ12PathSplitter9__xtoHashFNbNeKxSQHkQHj__TQHhTQGwZQHpFQHeZQCfZm@Base 12
+ _D3std4path__T13lastSeparatorTAxaZQuFNaNbNiNfQpZl@Base 12
+ _D3std4path__T13lastSeparatorTAyaZQuFNaNbNiNfQpZl@Base 12
+ _D3std4path__T15extSeparatorPosTAyaZQwFNaNbNiNfxAyaZl@Base 12
+ _D3std4path__T15filenameCharCmpVEQBfQBe13CaseSensitivei1ZQBrFNaNbNiNfwwZi@Base 12
+ _D3std4path__T16asNormalizedPathTSQBg5range__T5chainTSQCa3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDzQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFoFNaNbNiNfNkMQFkZSQGuQGt__TQGrTQGcZQGzFNkMQGnZQCe@Base 12
+ _D3std4path__T16asNormalizedPathTSQBg5range__T5chainTSQCa3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDzQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFoFNkMQFcZQt11__xopEqualsMxFKxSQHgQHf__TQHdTQGoZQHlFNkMQGzZQCqZb@Base 12
+ _D3std4path__T16asNormalizedPathTSQBg5range__T5chainTSQCa3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDzQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFoFNkMQFcZQt11getElement0MFNaNbNiNfZa@Base 12
+ _D3std4path__T16asNormalizedPathTSQBg5range__T5chainTSQCa3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDzQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFoFNkMQFcZQt4saveMFNaNbNdNiNfZSQHgQHf__TQHdTQGoZQHlFNkMQGzZQCq@Base 12
+ _D3std4path__T16asNormalizedPathTSQBg5range__T5chainTSQCa3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDzQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFoFNkMQFcZQt5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std4path__T16asNormalizedPathTSQBg5range__T5chainTSQCa3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDzQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFoFNkMQFcZQt5frontMFNaNbNdNiNfZa@Base 12
+ _D3std4path__T16asNormalizedPathTSQBg5range__T5chainTSQCa3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDzQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFoFNkMQFcZQt5isDotFNaNbNiNfQFxZb@Base 12
+ _D3std4path__T16asNormalizedPathTSQBg5range__T5chainTSQCa3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDzQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFoFNkMQFcZQt6__ctorMFNaNbNcNiNfQGbZSQHlQHk__TQHiTQGtZQHqFNkMQHeZQCv@Base 12
+ _D3std4path__T16asNormalizedPathTSQBg5range__T5chainTSQCa3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDzQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFoFNkMQFcZQt6__initZ@Base 12
+ _D3std4path__T16asNormalizedPathTSQBg5range__T5chainTSQCa3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDzQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFoFNkMQFcZQt8isDotDotFNaNbNiNfQGaZb@Base 12
+ _D3std4path__T16asNormalizedPathTSQBg5range__T5chainTSQCa3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDzQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFoFNkMQFcZQt8popFrontMFNaNbNiNfZv@Base 12
+ _D3std4path__T16asNormalizedPathTSQBg5range__T5chainTSQCa3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDzQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFoFNkMQFcZQt9__xtoHashFNbNeKxSQHfQHe__TQHcTQGnZQHkFNkMQGyZQCpZm@Base 12
+ _D3std4path__T18rtrimDirSeparatorsTAxaZQzFNaNbNiNfQpZQs@Base 12
+ _D3std4path__T18rtrimDirSeparatorsTAyaZQzFNaNbNiNfQpZQs@Base 12
+ _D3std4path__T7dirNameTxaZQmFNaNbNiNfNkMAxaZQe@Base 12
+ _D3std4path__T7dirNameTyaZQmFNaNbNiNfNkMAyaZQe@Base 12
+ _D3std4path__T8_dirNameTAxaZQoFNaNbNiNfNkMQsZQv@Base 12
+ _D3std4path__T8_dirNameTAxaZQoFNkMQkZ6resultFNaNbNiNfbQBeZQBi@Base 12
+ _D3std4path__T8_dirNameTAyaZQoFNaNbNiNfNkMQsZQv@Base 12
+ _D3std4path__T8_dirNameTAyaZQoFNkMQkZ6resultFNaNbNiNfbQBeZQBi@Base 12
+ _D3std4path__T8baseNameTxaZQnFNaNbNiNfNkMAxaZQe@Base 12
+ _D3std4path__T8baseNameTyaZQnFNaNbNiNfNkMAyaZQe@Base 12
+ _D3std4path__T8isRootedTAxaZQoFNaNbNiNfQpZb@Base 12
+ _D3std4path__T8isRootedTAyaZQoFNaNbNiNfQpZb@Base 12
+ _D3std4path__T8isRootedTSQx5range__T5chainTSQBq3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDpQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFeFNaNbNiNfQFgZb@Base 12
+ _D3std4path__T8rootNameTSQx5range__T5chainTSQBq3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDpQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFeFNaNbNiNfQFgZQFk@Base 12
+ _D3std4path__T9_baseNameTAxaZQpFNaNbNiNfNkMQsZQv@Base 12
+ _D3std4path__T9_baseNameTAyaZQpFNaNbNiNfNkMQsZQv@Base 12
+ _D3std4path__T9_rootNameTSQy5range__T5chainTSQBr3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDqQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFfFNaNbNiNfQFgZQFk@Base 12
+ _D3std4path__T9buildPathTAAxaZQqFMQjZ__T11trustedCastTAyaTAaZQvFNaNbNiNeQoZQv@Base 12
+ _D3std4path__T9buildPathTAAxaZQqFNaNbNfMQpZAya@Base 12
+ _D3std4path__T9buildPathTaZQnFNaNbNfAAxaXAya@Base 12
+ _D3std4path__T9chainPathTAaTAxaZQsFNaNbNiNfQsQrZSQBv5range__T5chainTSQCp3utf__T10byCodeUnitTQCpZQrFQCwZ14ByCodeUnitImplTSQEpQCu__T10OnlyResultTaZQpTSQFrQDc__TQDbTQFeZQDjFQFmZQCtZQEnFQEkQCnQBoZ6Result@Base 12
+ _D3std4path__T9chainPathTAxaTQeZQsFNaNbNiNfQsQuZSQBv5range__T5chainTSQCp3utf__T10byCodeUnitTQCpZQrFQCwZ14ByCodeUnitImplTSQEpQCu__T10OnlyResultTaZQpTQDcZQDnFQDkQBnQDqZ6Result@Base 12
+ _D3std4path__T9chainPathTAyaTQeZQsFNaNbNiNfQsQuZSQBv5range__T5chainTSQCp3utf__T10byCodeUnitTQCpZQrFQCwZ14ByCodeUnitImplTSQEpQCu__T10OnlyResultTaZQpTQDcZQDnFQDkQBnQDqZ6Result@Base 12
+ _D3std4path__T9extensionTAyaZQpFNaNbNiNfQpZQs@Base 12
+ _D3std4path__T9globMatchVEQyQw13CaseSensitivei1TaTAyaZQBoFNaNbNfQoAxaZb@Base 12
+ _D3std4path__T9globMatchVEQyQw13CaseSensitivei1TaTSQBx3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplZQDjFNaNbNfQCjAxaZb@Base 12
+ _D3std4uuid10randomUUIDFNfZSQBaQz4UUID@Base 12
+ _D3std4uuid11__moduleRefZ@Base 12
+ _D3std4uuid12__ModuleInfoZ@Base 12
+ _D3std4uuid20UUIDParsingException6__ctorMFNaNeAyamEQBxQBwQBu6ReasonQvC6object9ThrowableQBpmZCQDnQDmQDk@Base 12
+ _D3std4uuid20UUIDParsingException6__initZ@Base 12
+ _D3std4uuid20UUIDParsingException6__vtblZ@Base 12
+ _D3std4uuid20UUIDParsingException7__ClassZ@Base 12
+ _D3std4uuid4UUID11uuidVersionMxFNaNbNdNiNfZEQBqQBpQBn7Version@Base 12
+ _D3std4uuid4UUID4swapMFNaNbNiNfKSQBfQBeQBcZv@Base 12
+ _D3std4uuid4UUID5emptyMxFNaNbNdNiNeZb@Base 12
+ _D3std4uuid4UUID5opCmpMxFNaNbNiNfKxSQBiQBhQBfZi@Base 12
+ _D3std4uuid4UUID5opCmpMxFNaNbNiNfxSQBhQBgQBeZi@Base 12
+ _D3std4uuid4UUID6__ctorMFNaNbNcNiNfKxG16hZSQBpQBoQBm@Base 12
+ _D3std4uuid4UUID6__ctorMFNaNbNcNiNfxG16hZSQBoQBnQBl@Base 12
+ _D3std4uuid4UUID6__initZ@Base 12
+ _D3std4uuid4UUID6toHashMxFNaNbNiNfZm@Base 12
+ _D3std4uuid4UUID7Version6__initZ@Base 12
+ _D3std4uuid4UUID7variantMxFNaNbNdNiNfZEQBlQBkQBi7Variant@Base 12
+ _D3std4uuid4UUID8opAssignMFNaNbNiNfKxSQBkQBjQBhZSQBvQBuQBs@Base 12
+ _D3std4uuid4UUID8opAssignMFNaNbNiNfxSQBjQBiQBgZSQBuQBtQBr@Base 12
+ _D3std4uuid4UUID8opEqualsMxFNaNbNiNfKxSQBlQBkQBiZb@Base 12
+ _D3std4uuid4UUID8opEqualsMxFNaNbNiNfxSQBkQBjQBhZb@Base 12
+ _D3std4uuid4UUID8toStringMxFNaNbNeZAya@Base 12
+ _D3std4uuid4UUID__T6__ctorTaZQkMFNaNcNfIAaZSQBqQBpQBn@Base 12
+ _D3std4uuid4UUID__T6__ctorTaZQkMFNcIAaZ7skipIndyAi@Base 12
+ _D3std4uuid4UUID__T6toCharTaZQkMxFNaNbNiNfmZa@Base 12
+ _D3std4uuid4UUID__T8toStringTAaZQnMxFNaNbNiNfMQrZv@Base 12
+ _D3std4uuid4UUID__T9asArrayOfTkZQnMFNaNbNcNiNjNeZG4k@Base 12
+ _D3std4uuid7md5UUIDFNaNbNiNfxAaxSQBfQBe4UUIDZSQBsQBrQn@Base 12
+ _D3std4uuid7md5UUIDFNaNbNiNfxAhxSQBfQBe4UUIDZSQBsQBrQn@Base 12
+ _D3std4uuid8sha1UUIDFNaNbNiNfMAxaxSQBhQBg4UUIDZSQBuQBtQn@Base 12
+ _D3std4uuid8sha1UUIDFNaNbNiNfMAxhxSQBhQBg4UUIDZSQBuQBtQn@Base 12
+ _D3std4uuid__T10randomUUIDTSQBa6random__T21MersenneTwisterEngineTkVmi32Vmi624Vmi397Vmi31Vki2567483615Vmi11Vki4294967295Vmi7Vki2636928640Vmi15Vki4022730752Vmi18Vki1812433253ZQFcZQGhFNaNbNiNfKQGhZSQHlQHk4UUID@Base 12
+ _D3std4zlib10UnCompress10uncompressMFAxvZQe@Base 12
+ _D3std4zlib10UnCompress5emptyMxFNdZb@Base 12
+ _D3std4zlib10UnCompress5errorMFiZv@Base 12
+ _D3std4zlib10UnCompress5flushMFZAv@Base 12
+ _D3std4zlib10UnCompress6__ctorMFEQBfQBe12HeaderFormatZCQCbQCaQBy@Base 12
+ _D3std4zlib10UnCompress6__ctorMFkZCQBhQBgQBe@Base 12
+ _D3std4zlib10UnCompress6__dtorMFZv@Base 12
+ _D3std4zlib10UnCompress6__initZ@Base 12
+ _D3std4zlib10UnCompress6__vtblZ@Base 12
+ _D3std4zlib10UnCompress7__ClassZ@Base 12
+ _D3std4zlib10uncompressFAxvmiZAv@Base 12
+ _D3std4zlib11__moduleRefZ@Base 12
+ _D3std4zlib12__ModuleInfoZ@Base 12
+ _D3std4zlib13ZlibException6__ctorMFiZCQBkQBjQBh@Base 12
+ _D3std4zlib13ZlibException6__initZ@Base 12
+ _D3std4zlib13ZlibException6__vtblZ@Base 12
+ _D3std4zlib13ZlibException6getmsgFNaNbNiNfiZAya@Base 12
+ _D3std4zlib13ZlibException7__ClassZ@Base 12
+ _D3std4zlib5crc32FkAxvZk@Base 12
+ _D3std4zlib7adler32FkAxvZk@Base 12
+ _D3std4zlib8Compress5errorMFiZv@Base 12
+ _D3std4zlib8Compress5flushMFiZAv@Base 12
+ _D3std4zlib8Compress6__ctorMFEQBcQBb12HeaderFormatZCQByQBxQBv@Base 12
+ _D3std4zlib8Compress6__ctorMFiEQBdQBc12HeaderFormatZCQBzQByQBw@Base 12
+ _D3std4zlib8Compress6__dtorMFZv@Base 12
+ _D3std4zlib8Compress6__initZ@Base 12
+ _D3std4zlib8Compress6__vtblZ@Base 12
+ _D3std4zlib8Compress7__ClassZ@Base 12
+ _D3std4zlib8Compress8compressMFAxvZQe@Base 12
+ _D3std4zlib8compressFAxvZAh@Base 12
+ _D3std4zlib8compressFAxviZAh@Base 12
+ _D3std5array11__moduleRefZ@Base 12
+ _D3std5array12__ModuleInfoZ@Base 12
+ _D3std5array__T11replaceIntoTxaTSQBfQBe__T8AppenderTAxaZQoTAyaTQeZQBzFNaNbNfQBsQBbQxQzZv@Base 12
+ _D3std5array__T11replaceIntoTyaTSQBfQBe__T8AppenderTAyaZQoTQhTQkZQByFNaNbNfQBrQBaQBdQBgZv@Base 12
+ _D3std5array__T13copyBackwardsTSQBe5regex8internal2ir10NamedGroupZQBzFNaAQBqQeZv@Base 12
+ _D3std5array__T13copyBackwardsTSQBe5regex8internal2ir8BytecodeZQBwFNaAQBnQeZv@Base 12
+ _D3std5array__T13insertInPlaceTSQBe5regex8internal2ir10NamedGroupTQBjZQCdFNaNfKAQBxmQCbZv@Base 12
+ _D3std5array__T13insertInPlaceTSQBe5regex8internal2ir8BytecodeTQBgTQBkZQCeFNaNfKAQBymQCcQCfZv@Base 12
+ _D3std5array__T13insertInPlaceTSQBe5regex8internal2ir8BytecodeTQBgZQCaFNaNfKAQBumQByZv@Base 12
+ _D3std5array__T14arrayAllocImplVbi0TASQBk3uni17CodepointIntervalTmZQCaFNaNbmZQBp@Base 12
+ _D3std5array__T14arrayAllocImplVbi0TAaTmZQBaFNaNbmZQp@Base 12
+ _D3std5array__T14arrayAllocImplVbi0TAfTmZQBaFNaNbmZQp@Base 12
+ _D3std5array__T14arrayAllocImplVbi0TAhTmZQBaFNaNbmZQp@Base 12
+ _D3std5array__T14arrayAllocImplVbi0TAkTmZQBaFNaNbmZQp@Base 12
+ _D3std5array__T18uninitializedArrayTASQBk3uni17CodepointIntervalTmZQCaFNaNbNemZQBr@Base 12
+ _D3std5array__T18uninitializedArrayTASQBk3uni17CodepointIntervalTxmZQCbFNaNbNexmZQBt@Base 12
+ _D3std5array__T18uninitializedArrayTASQBk3uni17CodepointIntervalTyiZQCbFNaNbNeyiZQBt@Base 12
+ _D3std5array__T18uninitializedArrayTAaTmZQBaFNaNbNemZQr@Base 12
+ _D3std5array__T18uninitializedArrayTAaTxmZQBbFNaNbNexmZQt@Base 12
+ _D3std5array__T18uninitializedArrayTAfTmZQBaFNaNbNemZQr@Base 12
+ _D3std5array__T18uninitializedArrayTAhTkZQBaFNaNbNekZQr@Base 12
+ _D3std5array__T18uninitializedArrayTAhTmZQBaFNaNbNemZQr@Base 12
+ _D3std5array__T18uninitializedArrayTAkTxmZQBbFNaNbNexmZQt@Base 12
+ _D3std5array__T19appenderNewCapacityVmi16ZQBbFNaNbNiNfmmZm@Base 12
+ _D3std5array__T19appenderNewCapacityVmi1ZQBaFNaNbNiNfmmZm@Base 12
+ _D3std5array__T19appenderNewCapacityVmi2ZQBaFNaNbNiNfmmZm@Base 12
+ _D3std5array__T19appenderNewCapacityVmi40ZQBbFNaNbNiNfmmZm@Base 12
+ _D3std5array__T19appenderNewCapacityVmi4ZQBaFNaNbNiNfmmZm@Base 12
+ _D3std5array__T19appenderNewCapacityVmi8ZQBaFNaNbNiNfmmZm@Base 12
+ _D3std5array__T5splitTAyaTQeZQoFNaNbNfQqQsZAQw@Base 12
+ _D3std5array__T5splitTAyaZQlFNaNfQlZAQp@Base 12
+ _D3std5array__T7overlapTvTvZQnFNaNbNiNeAvQcZQf@Base 12
+ _D3std5array__T7replaceTxaTAyaTQeZQtFNaNbNfAxaQtQvZQi@Base 12
+ _D3std5array__T7replaceTyaTAyaTQeZQtFNaNbNfQqQsQuZQx@Base 12
+ _D3std5array__T8AppenderTAAyaZQp13ensureAddableMFNaNbNfmZv@Base 12
+ _D3std5array__T8AppenderTAAyaZQp4Data11__xopEqualsMxFKxSQCcQCb__TQByTQBsZQCgQBsZb@Base 12
+ _D3std5array__T8AppenderTAAyaZQp4Data6__initZ@Base 12
+ _D3std5array__T8AppenderTAAyaZQp4Data9__xtoHashFNbNeKxSQCbQCa__TQBxTQBrZQCfQBrZm@Base 12
+ _D3std5array__T8AppenderTAAyaZQp4dataMNgFNaNbNdNiNeZANgAya@Base 12
+ _D3std5array__T8AppenderTAAyaZQp5clearMFNaNbNiNeZv@Base 12
+ _D3std5array__T8AppenderTAAyaZQp6__ctorMFNaNbNcNeQyZSQBzQBy__TQBvTQBpZQCd@Base 12
+ _D3std5array__T8AppenderTAAyaZQp6__initZ@Base 12
+ _D3std5array__T8AppenderTAAyaZQp7opSliceMNgFNaNbNdNiNeZANgAya@Base 12
+ _D3std5array__T8AppenderTAAyaZQp7reserveMFNaNbNfmZv@Base 12
+ _D3std5array__T8AppenderTAAyaZQp8capacityMxFNaNbNdNiNfZm@Base 12
+ _D3std5array__T8AppenderTAAyaZQp8shrinkToMFNaNemZv@Base 12
+ _D3std5array__T8AppenderTAAyaZQp__T3putTQoZQiMFNaNbNfQBbZv@Base 12
+ _D3std5array__T8AppenderTACQz3zip13ArchiveMemberZQBi13ensureAddableMFNaNbNfmZv@Base 12
+ _D3std5array__T8AppenderTACQz3zip13ArchiveMemberZQBi4Data11__xopEqualsMxFKxSQCwQCv__TQCsTQCmZQDaQBsZb@Base 12
+ _D3std5array__T8AppenderTACQz3zip13ArchiveMemberZQBi4Data6__initZ@Base 12
+ _D3std5array__T8AppenderTACQz3zip13ArchiveMemberZQBi4Data9__xtoHashFNbNeKxSQCvQCu__TQCrTQClZQCzQBrZm@Base 12
+ _D3std5array__T8AppenderTACQz3zip13ArchiveMemberZQBi4dataMNgFNaNbNdNiNeZANgCQCwQByQBx@Base 12
+ _D3std5array__T8AppenderTACQz3zip13ArchiveMemberZQBi5clearMFNaNbNiNeZv@Base 12
+ _D3std5array__T8AppenderTACQz3zip13ArchiveMemberZQBi6__ctorMFNaNbNcNeQBsZSQCuQCt__TQCqTQCkZQCy@Base 12
+ _D3std5array__T8AppenderTACQz3zip13ArchiveMemberZQBi6__initZ@Base 12
+ _D3std5array__T8AppenderTACQz3zip13ArchiveMemberZQBi7opSliceMNgFNaNbNdNiNeZANgCQCzQCbQCa@Base 12
+ _D3std5array__T8AppenderTACQz3zip13ArchiveMemberZQBi7reserveMFNaNbNfmZv@Base 12
+ _D3std5array__T8AppenderTACQz3zip13ArchiveMemberZQBi8capacityMxFNaNbNdNiNfZm@Base 12
+ _D3std5array__T8AppenderTACQz3zip13ArchiveMemberZQBi8shrinkToMFNaNemZv@Base 12
+ _D3std5array__T8AppenderTACQz3zip13ArchiveMemberZQBi__T3putTQBiZQjMFNaNbNfQBwZv@Base 12
+ _D3std5array__T8AppenderTASQz6socket11AddressInfoZQBj13ensureAddableMFNaNbNfmZv@Base 12
+ _D3std5array__T8AppenderTASQz6socket11AddressInfoZQBj4Data11__xopEqualsMxFKxSQCxQCw__TQCtTQCnZQDbQBsZb@Base 12
+ _D3std5array__T8AppenderTASQz6socket11AddressInfoZQBj4Data6__initZ@Base 12
+ _D3std5array__T8AppenderTASQz6socket11AddressInfoZQBj4Data9__xtoHashFNbNeKxSQCwQCv__TQCsTQCmZQDaQBrZm@Base 12
+ _D3std5array__T8AppenderTASQz6socket11AddressInfoZQBj4dataMNgFNaNbNdNiNeZANgSQCxQBzQBv@Base 12
+ _D3std5array__T8AppenderTASQz6socket11AddressInfoZQBj5clearMFNaNbNiNeZv@Base 12
+ _D3std5array__T8AppenderTASQz6socket11AddressInfoZQBj6__ctorMFNaNbNcNeQBtZSQCvQCu__TQCrTQClZQCz@Base 12
+ _D3std5array__T8AppenderTASQz6socket11AddressInfoZQBj6__initZ@Base 12
+ _D3std5array__T8AppenderTASQz6socket11AddressInfoZQBj7opSliceMNgFNaNbNdNiNeZANgSQDaQCcQBy@Base 12
+ _D3std5array__T8AppenderTASQz6socket11AddressInfoZQBj7reserveMFNaNbNfmZv@Base 12
+ _D3std5array__T8AppenderTASQz6socket11AddressInfoZQBj8capacityMxFNaNbNdNiNfZm@Base 12
+ _D3std5array__T8AppenderTASQz6socket11AddressInfoZQBj8shrinkToMFNaNemZv@Base 12
+ _D3std5array__T8AppenderTASQz6socket11AddressInfoZQBj__T3putTQBjZQjMFNaNbNfQBxZv@Base 12
+ _D3std5array__T8AppenderTAaZQn13ensureAddableMFNaNbNfmZv@Base 12
+ _D3std5array__T8AppenderTAaZQn4Data11__xopEqualsMxFKxSQCaQBz__TQBwTQBqZQCeQBsZb@Base 12
+ _D3std5array__T8AppenderTAaZQn4Data6__initZ@Base 12
+ _D3std5array__T8AppenderTAaZQn4Data9__xtoHashFNbNeKxSQBzQBy__TQBvTQBpZQCdQBrZm@Base 12
+ _D3std5array__T8AppenderTAaZQn4dataMNgFNaNbNdNiNeZANga@Base 12
+ _D3std5array__T8AppenderTAaZQn5clearMFNaNbNiNeZv@Base 12
+ _D3std5array__T8AppenderTAaZQn6__ctorMFNaNbNcNeQwZSQBxQBw__TQBtTQBnZQCb@Base 12
+ _D3std5array__T8AppenderTAaZQn6__initZ@Base 12
+ _D3std5array__T8AppenderTAaZQn7opSliceMNgFNaNbNdNiNeZANga@Base 12
+ _D3std5array__T8AppenderTAaZQn7reserveMFNaNbNfmZv@Base 12
+ _D3std5array__T8AppenderTAaZQn8capacityMxFNaNbNdNiNfZm@Base 12
+ _D3std5array__T8AppenderTAaZQn8shrinkToMFNaNemZv@Base 12
+ _D3std5array__T8AppenderTAaZQn__T3putTQnZQiMFNaNbNfQBaZv@Base 12
+ _D3std5array__T8AppenderTAaZQn__T3putTaZQhMFNaNbNfaZv@Base 12
+ _D3std5array__T8AppenderTAaZQn__T3putTwZQhMFNaNfwZv@Base 12
+ _D3std5array__T8AppenderTAxaZQo13ensureAddableMFNaNbNfmZv@Base 12
+ _D3std5array__T8AppenderTAxaZQo4Data11__xopEqualsMxFKxSQCbQCa__TQBxTQBrZQCfQBsZb@Base 12
+ _D3std5array__T8AppenderTAxaZQo4Data6__initZ@Base 12
+ _D3std5array__T8AppenderTAxaZQo4Data9__xtoHashFNbNeKxSQCaQBz__TQBwTQBqZQCeQBrZm@Base 12
+ _D3std5array__T8AppenderTAxaZQo4dataMNgFNaNbNdNiNeZANgxa@Base 12
+ _D3std5array__T8AppenderTAxaZQo6__ctorMFNaNbNcNeQxZSQByQBx__TQBuTQBoZQCc@Base 12
+ _D3std5array__T8AppenderTAxaZQo6__initZ@Base 12
+ _D3std5array__T8AppenderTAxaZQo7opSliceMNgFNaNbNdNiNeZANgxa@Base 12
+ _D3std5array__T8AppenderTAxaZQo7reserveMFNaNbNfmZv@Base 12
+ _D3std5array__T8AppenderTAxaZQo8capacityMxFNaNbNdNiNfZm@Base 12
+ _D3std5array__T8AppenderTAxaZQo__T3putTAaZQiMFNaNbNfQnZv@Base 12
+ _D3std5array__T8AppenderTAxaZQo__T3putTAyaZQjMFNaNbNfQoZv@Base 12
+ _D3std5array__T8AppenderTAxaZQo__T3putTQoZQiMFNaNbNfQBbZv@Base 12
+ _D3std5array__T8AppenderTAxaZQo__T3putTwZQhMFNaNfwZv@Base 12
+ _D3std5array__T8AppenderTAyaZQo13ensureAddableMFNaNbNfmZv@Base 12
+ _D3std5array__T8AppenderTAyaZQo4Data11__xopEqualsMxFKxSQCbQCa__TQBxTQBrZQCfQBsZb@Base 12
+ _D3std5array__T8AppenderTAyaZQo4Data6__initZ@Base 12
+ _D3std5array__T8AppenderTAyaZQo4Data9__xtoHashFNbNeKxSQCaQBz__TQBwTQBqZQCeQBrZm@Base 12
+ _D3std5array__T8AppenderTAyaZQo4dataMNgFNaNbNdNiNeZQBa@Base 12
+ _D3std5array__T8AppenderTAyaZQo6__ctorMFNaNbNcNeQxZSQByQBx__TQBuTQBoZQCc@Base 12
+ _D3std5array__T8AppenderTAyaZQo6__initZ@Base 12
+ _D3std5array__T8AppenderTAyaZQo7opSliceMNgFNaNbNdNiNeZQBd@Base 12
+ _D3std5array__T8AppenderTAyaZQo7reserveMFNaNbNfmZv@Base 12
+ _D3std5array__T8AppenderTAyaZQo8capacityMxFNaNbNdNiNfZm@Base 12
+ _D3std5array__T8AppenderTAyaZQo__T3putTAaZQiMFNaNbNfQnZv@Base 12
+ _D3std5array__T8AppenderTAyaZQo__T3putTAhZQiMFNaNbNfQnZv@Base 12
+ _D3std5array__T8AppenderTAyaZQo__T3putTAwZQiMFNaNfQlZv@Base 12
+ _D3std5array__T8AppenderTAyaZQo__T3putTAxaZQjMFNaNbNfQoZv@Base 12
+ _D3std5array__T8AppenderTAyaZQo__T3putTQoZQiMFNaNbNfQBbZv@Base 12
+ _D3std5array__T8AppenderTAyaZQo__T3putTSQBm4path__T16asNormalizedPathTSQCr5range__T5chainTSQDl3utf__T10byCodeUnitTQDlZQrFQDsZ14ByCodeUnitImplTSQFlQCu__T10OnlyResultTaZQpTQDcZQDnFQDkQBnQDqZ6ResultZQFpFNkMQFdZQtZQGuMFNaNbNfQHaZv@Base 12
+ _D3std5array__T8AppenderTAyaZQo__T3putTSQBm5range__T5chainTSQCg3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQEfQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFaMFNaNbNfQFgZv@Base 12
+ _D3std5array__T8AppenderTAyaZQo__T3putTSQBm5range__T5chainTSQCgQu__T4TakeTSQCvQBj__T6RepeatTaZQkZQBdTSQDw4conv__T7toCharsVii10TaVEQEy5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQEwFQEtQDgZQtZQGcMFNaNbNfQGiZv@Base 12
+ _D3std5array__T8AppenderTAyaZQo__T3putTaZQhMFNaNbNfaZv@Base 12
+ _D3std5array__T8AppenderTAyaZQo__T3putTwZQhMFNaNfwZv@Base 12
+ _D3std5array__T8AppenderTAyaZQo__T3putTxaZQiMFNaNbNfxaZv@Base 12
+ _D3std5array__T8AppenderTAyaZQo__T3putTxwZQiMFNaNfxwZv@Base 12
+ _D3std5array__T8AppenderTAyaZQo__T3putTyaZQiMFNaNbNfyaZv@Base 12
+ _D3std5array__T8AppenderTAyuZQo13ensureAddableMFNaNbNfmZv@Base 12
+ _D3std5array__T8AppenderTAyuZQo4Data11__xopEqualsMxFKxSQCbQCa__TQBxTQBrZQCfQBsZb@Base 12
+ _D3std5array__T8AppenderTAyuZQo4Data6__initZ@Base 12
+ _D3std5array__T8AppenderTAyuZQo4Data9__xtoHashFNbNeKxSQCaQBz__TQBwTQBqZQCeQBrZm@Base 12
+ _D3std5array__T8AppenderTAyuZQo4dataMNgFNaNbNdNiNeZQBa@Base 12
+ _D3std5array__T8AppenderTAyuZQo6__ctorMFNaNbNcNeQxZSQByQBx__TQBuTQBoZQCc@Base 12
+ _D3std5array__T8AppenderTAyuZQo6__initZ@Base 12
+ _D3std5array__T8AppenderTAyuZQo7opSliceMNgFNaNbNdNiNeZQBd@Base 12
+ _D3std5array__T8AppenderTAyuZQo7reserveMFNaNbNfmZv@Base 12
+ _D3std5array__T8AppenderTAyuZQo8capacityMxFNaNbNdNiNfZm@Base 12
+ _D3std5array__T8AppenderTAyuZQo__T3putTAuZQiMFNaNbNfQnZv@Base 12
+ _D3std5array__T8AppenderTAyuZQo__T3putTQoZQiMFNaNbNfQBbZv@Base 12
+ _D3std5array__T8AppenderTAyuZQo__T3putTwZQhMFNaNfwZv@Base 12
+ _D3std5array__T8AppenderTAywZQo13ensureAddableMFNaNbNfmZv@Base 12
+ _D3std5array__T8AppenderTAywZQo4Data11__xopEqualsMxFKxSQCbQCa__TQBxTQBrZQCfQBsZb@Base 12
+ _D3std5array__T8AppenderTAywZQo4Data6__initZ@Base 12
+ _D3std5array__T8AppenderTAywZQo4Data9__xtoHashFNbNeKxSQCaQBz__TQBwTQBqZQCeQBrZm@Base 12
+ _D3std5array__T8AppenderTAywZQo4dataMNgFNaNbNdNiNeZQBa@Base 12
+ _D3std5array__T8AppenderTAywZQo6__ctorMFNaNbNcNeQxZSQByQBx__TQBuTQBoZQCc@Base 12
+ _D3std5array__T8AppenderTAywZQo6__initZ@Base 12
+ _D3std5array__T8AppenderTAywZQo7opSliceMNgFNaNbNdNiNeZQBd@Base 12
+ _D3std5array__T8AppenderTAywZQo7reserveMFNaNbNfmZv@Base 12
+ _D3std5array__T8AppenderTAywZQo8capacityMxFNaNbNdNiNfZm@Base 12
+ _D3std5array__T8AppenderTAywZQo__T3putTQoZQiMFNaNbNfQBbZv@Base 12
+ _D3std5array__T8AppenderTAywZQo__T3putTwZQhMFNaNbNfwZv@Base 12
+ _D3std5array__T8AppenderTyAaZQo13ensureAddableMFNaNbNfmZv@Base 12
+ _D3std5array__T8AppenderTyAaZQo4Data11__xopEqualsMxFKxSQCbQCa__TQBxTyQBrZQCgQBtZb@Base 12
+ _D3std5array__T8AppenderTyAaZQo4Data6__initZ@Base 12
+ _D3std5array__T8AppenderTyAaZQo4Data9__xtoHashFNbNeKxSQCaQBz__TQBwTyQBqZQCfQBsZm@Base 12
+ _D3std5array__T8AppenderTyAaZQo4dataMNgFNaNbNdNiNeZAya@Base 12
+ _D3std5array__T8AppenderTyAaZQo6__ctorMFNaNbNcNeyQxZSQBzQBy__TQBvTyQBpZQCe@Base 12
+ _D3std5array__T8AppenderTyAaZQo6__initZ@Base 12
+ _D3std5array__T8AppenderTyAaZQo7opSliceMNgFNaNbNdNiNeZAya@Base 12
+ _D3std5array__T8AppenderTyAaZQo7reserveMFNaNbNfmZv@Base 12
+ _D3std5array__T8AppenderTyAaZQo8capacityMxFNaNbNdNiNfZm@Base 12
+ _D3std5array__T8AppenderTyAaZQo__T3putTAaZQiMFNaNbNfQnZv@Base 12
+ _D3std5array__T8AppenderTyAaZQo__T3putTAwZQiMFNaNfQlZv@Base 12
+ _D3std5array__T8AppenderTyAaZQo__T3putTAxaZQjMFNaNbNfQoZv@Base 12
+ _D3std5array__T8AppenderTyAaZQo__T3putTAyaZQjMFNaNbNfQoZv@Base 12
+ _D3std5array__T8AppenderTyAaZQo__T3putTaZQhMFNaNbNfaZv@Base 12
+ _D3std5array__T8AppenderTyAaZQo__T3putTwZQhMFNaNfwZv@Base 12
+ _D3std5array__T8AppenderTyAaZQo__T3putTxaZQiMFNaNbNfxaZv@Base 12
+ _D3std5array__T8AppenderTyAaZQo__T3putTxwZQiMFNaNfxwZv@Base 12
+ _D3std5array__T8AppenderTyAaZQo__T3putTyaZQiMFNaNbNfyaZv@Base 12
+ _D3std5array__T8appenderTAAyaZQpFNaNbNfZSQBnQBm__T8AppenderTQBjZQo@Base 12
+ _D3std5array__T8appenderTACQz3zip13ArchiveMemberZQBiFNaNbNfZSQChQCg__T8AppenderTQCdZQo@Base 12
+ _D3std5array__T8appenderTASQz6socket11AddressInfoZQBjFNaNbNfZSQCiQCh__T8AppenderTQCeZQo@Base 12
+ _D3std5array__T8appenderTAaZQnFNaNbNfZSQBlQBk__T8AppenderTQBhZQo@Base 12
+ _D3std5array__T8appenderTAxaZQoFNaNbNfZSQBmQBl__T8AppenderTQBiZQo@Base 12
+ _D3std5array__T8appenderTAyaZQoFNaNbNfZSQBmQBl__T8AppenderTQBiZQo@Base 12
+ _D3std5array__T8appenderTAyuZQoFNaNbNfZSQBmQBl__T8AppenderTQBiZQo@Base 12
+ _D3std5array__T8appenderTAywZQoFNaNbNfZSQBmQBl__T8AppenderTQBiZQo@Base 12
+ _D3std5array__T8appenderTyAaZQoFNaNbNfZSQBmQBl__T8AppenderTyQBiZQp@Base 12
+ _D3std5array__TQjTS6object__T7byValueHTHAyaCQBq3zip13ArchiveMemberTQBbTQBcZQBuFNaNbNiNfQBwZ6ResultZQDpFNaNbNfQDnZAQCt@Base 12
+ _D3std5array__TQjTSQr3uni__T13InversionListTSQBrQBb8GcPolicyZQBh__T9IntervalsTAkZQoZQDaFNaNbNfQCyZASQDuQDe17CodepointInterval@Base 12
+ _D3std5array__TQjTSQr3uni__T13InversionListTSQBrQBb8GcPolicyZQBh__T9IntervalsTAxkZQpZQDbFNaNbNfQCzZASQDvQDf17CodepointInterval@Base 12
+ _D3std5array__TQjTSQr3uni__T8CowArrayTSQBlQv8GcPolicyZQBaZQCaFNaNbNfQByZAk@Base 12
+ _D3std5array__TQjTSQr3utf__T5byUTFTaVEQBk8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDiTSQEn6string__T14rightJustifierTSQFsQFc__TQFbTwVQEyi1Z__TQFqTSQGvQGf__T10byCodeUnitTQFmZQrFQFtZ14ByCodeUnitImplZQHtFNcQCfZ6ResultZQElFQDymwZQsZQIyFNcQFsZQBfZQKiFNaNbNfQKgZAa@Base 12
+ _D3std5array__TQjTSQr4conv__T7toCharsVii10TaVEQBs5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQDkFNaNbNfQDiZAa@Base 12
+ _D3std5array__TQjTSQr4conv__T7toCharsVii10TaVEQBs5ascii10LetterCasei1TkZQBrFNaNbNiNfkZ6ResultZQDkFNaNbNfQDiZAa@Base 12
+ _D3std5array__TQjTSQr4conv__T7toCharsVii10TaVEQBs5ascii10LetterCasei1TlZQBrFNaNbNiNflZ6ResultZQDkFNaNbNfQDiZAa@Base 12
+ _D3std5array__TQjTSQr4conv__T7toCharsVii10TaVEQBs5ascii10LetterCasei1TmZQBrFNaNbNiNfmZ6ResultZQDkFNaNbNfQDiZAa@Base 12
+ _D3std5array__TQjTSQr4conv__T7toCharsVii16TaVEQBs5ascii10LetterCasei0TkZQBrFNaNbNiNfkZ6ResultZQDkFNaNbNfQDiZAa@Base 12
+ _D3std5array__TQjTSQr4conv__T7toCharsVii16TaVEQBs5ascii10LetterCasei0TmZQBrFNaNbNiNfmZ6ResultZQDkFNaNbNfQDiZAa@Base 12
+ _D3std5array__TQjTSQr4conv__T7toCharsVii16TaVEQBs5ascii10LetterCasei1TkZQBrFNaNbNiNfkZ6ResultZQDkFNaNbNfQDiZAa@Base 12
+ _D3std5array__TQjTSQr4conv__T7toCharsVii16TaVEQBs5ascii10LetterCasei1TmZQBrFNaNbNiNfmZ6ResultZQDkFNaNbNfQDiZAa@Base 12
+ _D3std5array__TQjTSQr4conv__T7toCharsVii2TaVEQBr5ascii10LetterCasei1TkZQBqFNaNbNiNfkZ6ResultZQDjFNaNbNfQDhZAa@Base 12
+ _D3std5array__TQjTSQr4conv__T7toCharsVii2TaVEQBr5ascii10LetterCasei1TmZQBqFNaNbNiNfmZ6ResultZQDjFNaNbNfQDhZAa@Base 12
+ _D3std5array__TQjTSQr4conv__T7toCharsVii8TaVEQBr5ascii10LetterCasei1TkZQBqFNaNbNiNfkZ6ResultZQDjFNaNbNfQDhZAa@Base 12
+ _D3std5array__TQjTSQr4conv__T7toCharsVii8TaVEQBr5ascii10LetterCasei1TmZQBqFNaNbNiNfmZ6ResultZQDjFNaNbNfQDhZAa@Base 12
+ _D3std5array__TQjTSQr5range__T5chainTSQBk3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQDjQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFgFNaNbNfQFeZAxa@Base 12
+ _D3std5array__TQjTSQr9algorithm9iteration__T10UniqResultSQCd10functional__T9binaryFunVAyaa6_61203d3d2062VQta1_61VQBba1_62ZQBvTSQEv5range__T11SortedRangeTAQCqVQCua5_61203c2062VEQGsQBx18SortedRangeOptionsi0ZQCoZQGjZQHzFNaNbNfQHxZQCw@Base 12
+ _D3std5array__TQjTSQr9algorithm9iteration__T8splitterVAyaa6_61203d3d2062VEQCu8typecons__T4FlagVQBpa14_6b656570536570617261746f7273ZQBqi0TQDfTQDjZQDxFQDrQDuZ6ResultZQGcFNaNbNfQGaZAQEv@Base 12
+ _D3std5ascii10isAlphaNumFNaNbNiNfwZb@Base 12
+ _D3std5ascii10isHexDigitFNaNbNiNfwZb@Base 12
+ _D3std5ascii10whitespaceyAa@Base 12
+ _D3std5ascii11__moduleRefZ@Base 12
+ _D3std5ascii11isGraphicalFNaNbNiNfwZb@Base 12
+ _D3std5ascii11isPrintableFNaNbNiNfwZb@Base 12
+ _D3std5ascii11octalDigitsyAa@Base 12
+ _D3std5ascii12__ModuleInfoZ@Base 12
+ _D3std5ascii12isOctalDigitFNaNbNiNfwZb@Base 12
+ _D3std5ascii13fullHexDigitsyAa@Base 12
+ _D3std5ascii13isPunctuationFNaNbNiNfwZb@Base 12
+ _D3std5ascii14lowerHexDigitsyAa@Base 12
+ _D3std5ascii6digitsyAa@Base 12
+ _D3std5ascii7isASCIIFNaNbNiNfwZb@Base 12
+ _D3std5ascii7isAlphaFNaNbNiNfwZb@Base 12
+ _D3std5ascii7isDigitFNaNbNiNfwZb@Base 12
+ _D3std5ascii7isLowerFNaNbNiNfwZb@Base 12
+ _D3std5ascii7isUpperFNaNbNiNfwZb@Base 12
+ _D3std5ascii7isWhiteFNaNbNiNfwZb@Base 12
+ _D3std5ascii7lettersyAa@Base 12
+ _D3std5ascii7newlineyAa@Base 12
+ _D3std5ascii9hexDigitsyAa@Base 12
+ _D3std5ascii9isControlFNaNbNiNfwZb@Base 12
+ _D3std5ascii9lowercaseyAa@Base 12
+ _D3std5ascii9uppercaseyAa@Base 12
+ _D3std5ascii__T7toLowerTwZQlFNaNbNiNfwZw@Base 12
+ _D3std5ascii__T7toLowerTxaZQmFNaNbNiNfxaZa@Base 12
+ _D3std5ascii__T7toLowerTxuZQmFNaNbNiNfxuZu@Base 12
+ _D3std5ascii__T7toLowerTxwZQmFNaNbNiNfxwZw@Base 12
+ _D3std5ascii__T7toLowerTyaZQmFNaNbNiNfyaZa@Base 12
+ _D3std5ascii__T7toLowerTywZQmFNaNbNiNfywZw@Base 12
+ _D3std5ascii__T7toUpperTwZQlFNaNbNiNfwZw@Base 12
+ _D3std5range10interfaces11__moduleRefZ@Base 12
+ _D3std5range10interfaces12__ModuleInfoZ@Base 12
+ _D3std5range10interfaces22UnsupportedRangeMethod6__initZ@Base 12
+ _D3std5range10interfaces22UnsupportedRangeMethod6__vtblZ@Base 12
+ _D3std5range10interfaces22UnsupportedRangeMethod7__ClassZ@Base 12
+ _D3std5range10interfaces22UnsupportedRangeMethod8__mixin26__ctorMFNaNbNiNfAyaC6object9ThrowableQvmZCQDuQDtQDqQDh@Base 12
+ _D3std5range10interfaces22UnsupportedRangeMethod8__mixin26__ctorMFNaNbNiNfAyaQdmC6object9ThrowableZCQDuQDtQDqQDh@Base 12
+ _D3std5range10primitives11__moduleRefZ@Base 12
+ _D3std5range10primitives12__ModuleInfoZ@Base 12
+ _D3std5range10primitives__T10walkLengthTAyaZQrFNaNbNiNfQpZm@Base 12
+ _D3std5range10primitives__T10walkLengthTSQBn4path__T16asNormalizedPathTSQCsQCr__T5chainTSQDj3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQFiQFh__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFlFNkMQEzZQtZQGyFNaNbNiNfQGxZm@Base 12
+ _D3std5range10primitives__T10walkLengthTSQBnQBm__T10roundRobinTSQCk9algorithm9iteration__T9MapResultSQDv10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQFz3uni21DecompressedIntervalsZQDuTSQHjQEzQEs__TQElSQHzQEe__TQDvVQDpa4_615b315dVQEea1_61ZQExTQDqZQGiZQIaFQHrQCvZ6ResultZQJqFNaNfQJlZm@Base 12
+ _D3std5range10primitives__T10walkLengthTSQBnQBm__T4TakeTSQCd3utf__T5byUTFTwVEQCx8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDiTSQGaQDx__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFlFNcQCfZ6ResultZQGuZQHvFNaNbNiNfQHuxmZm@Base 12
+ _D3std5range10primitives__T14popBackExactlyTAAyaZQwFNaNbNiNfKQrmZv@Base 12
+ _D3std5range10primitives__T14popBackExactlyTAC4core6thread5fiber5FiberZQBsFNaNbNiNfKQBomZv@Base 12
+ _D3std5range10primitives__T14popBackExactlyTSQBr3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQCpFNaNbNiNfKQClmZv@Base 12
+ _D3std5range10primitives__T14popBackExactlyTSQBr3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQCqFNaNbNiNfKQCmmZv@Base 12
+ _D3std5range10primitives__T14popBackExactlyTSQBr3utf__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImplZQCqFNaNbNiNfKQCmmZv@Base 12
+ _D3std5range10primitives__T14popBackExactlyTSQBr3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplZQCqFNaNbNiNfKQCmmZv@Base 12
+ _D3std5range10primitives__T14popBackExactlyTSQBr3utf__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImplZQCqFNaNbNiNfKQCmmZv@Base 12
+ _D3std5range10primitives__T15popFrontExactlyTAAyaZQxFNaNbNiNfKQrmZv@Base 12
+ _D3std5range10primitives__T15popFrontExactlyTAC4core6thread5fiber5FiberZQBtFNaNbNiNfKQBomZv@Base 12
+ _D3std5range10primitives__T3putTAkTkZQkFNaNbNiNfKQrkZv@Base 12
+ _D3std5range10primitives__T3putTDFMAxaZvTAaZQrFKQqQjZv@Base 12
+ _D3std5range10primitives__T3putTDFMAxaZvTAyaZQsFKQrQkZv@Base 12
+ _D3std5range10primitives__T3putTDFMAxaZvTQgZQrFKQqQpZv@Base 12
+ _D3std5range10primitives__T3putTDFNaNbNfAxaZvTAaZQwFNaNbNfKQBbQqZv@Base 12
+ _D3std5range10primitives__T3putTDFNaNbNfAxaZvTAyaZQxFNaNbNfKQBcQrZv@Base 12
+ _D3std5range10primitives__T3putTDFNaNbNfAxaZvTaZQvFNaNbNfKQBaaZv@Base 12
+ _D3std5range10primitives__T3putTDFNaNbNfAxaZvTxaZQwFNaNbNfKQBbxaZv@Base 12
+ _D3std5range10primitives__T3putTDFNaNbNfAxaZvTxwZQwFNaNfKQzxwZv@Base 12
+ _D3std5range10primitives__T3putTSQBf5array__T8AppenderTAAyaZQpTQhZQBnFNaNbNfKQBtQyZv@Base 12
+ _D3std5range10primitives__T3putTSQBf5array__T8AppenderTAyaZQoTAaZQBmFNaNbNfKQBsQrZv@Base 12
+ _D3std5range10primitives__T3putTSQBf5array__T8AppenderTAyaZQoTAwZQBmFNaNfKQBqQpZv@Base 12
+ _D3std5range10primitives__T3putTSQBf5array__T8AppenderTAyaZQoTAxaZQBnFNaNbNfKQBtQsZv@Base 12
+ _D3std5range10primitives__T3putTSQBf5array__T8AppenderTAyaZQoTQhZQBmFNaNbNfKQBsQyZv@Base 12
+ _D3std5range10primitives__T3putTSQBf5array__T8AppenderTAyaZQoTSQCj4path__T16asNormalizedPathTSQDoQDn__T5chainTSQEf3utf__T10byCodeUnitTQDbZQrFQDiZ14ByCodeUnitImplTSQGfQGe__T10OnlyResultTaZQpTQDcZQDnFQDkQBnQDqZ6ResultZQFmFNkMQFaZQtZQHvFNaNbNfKQIbQHaZv@Base 12
+ _D3std5range10primitives__T3putTSQBf5array__T8AppenderTAyaZQoTSQCjQCi__T5chainTSQDa3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQEzQEy__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQGbFNaNbNfKQGhQFgZv@Base 12
+ _D3std5range10primitives__T3putTSQBf5array__T8AppenderTAyaZQoTSQCjQCi__T5chainTSQDaQCz__T4TakeTSQDqQDp__T6RepeatTaZQkZQBdTSQEr4conv__T7toCharsVii10TaVEQFt5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQExFQEuQDgZQtZQHeFNaNbNfKQHkQGjZv@Base 12
+ _D3std5range10primitives__T3putTSQBf5array__T8AppenderTAyaZQoTaZQBlFNaNbNfKQBraZv@Base 12
+ _D3std5range10primitives__T3putTSQBf5array__T8AppenderTAyaZQoTxaZQBmFNaNbNfKQBsxaZv@Base 12
+ _D3std5range10primitives__T3putTSQBf5array__T8AppenderTAyaZQoTxwZQBmFNaNfKQBqxwZv@Base 12
+ _D3std5range10primitives__T3putTSQBf5array__T8AppenderTAyaZQoTyaZQBmFNaNbNfKQBsyaZv@Base 12
+ _D3std5range10primitives__T3putTSQBf5array__T8AppenderTyAaZQoTAaZQBmFNaNbNfKQBsQrZv@Base 12
+ _D3std5range10primitives__T3putTSQBf5array__T8AppenderTyAaZQoTAwZQBmFNaNfKQBqQpZv@Base 12
+ _D3std5range10primitives__T3putTSQBf5array__T8AppenderTyAaZQoTAxaZQBnFNaNbNfKQBtQsZv@Base 12
+ _D3std5range10primitives__T3putTSQBf5array__T8AppenderTyAaZQoTAyaZQBnFNaNbNfKQBtQsZv@Base 12
+ _D3std5range10primitives__T3putTSQBf5array__T8AppenderTyAaZQoTaZQBlFNaNbNfKQBraZv@Base 12
+ _D3std5range10primitives__T3putTSQBf5array__T8AppenderTyAaZQoTxaZQBmFNaNbNfKQBsxaZv@Base 12
+ _D3std5range10primitives__T3putTSQBf5array__T8AppenderTyAaZQoTxwZQBmFNaNfKQBqxwZv@Base 12
+ _D3std5range10primitives__T3putTSQBf5array__T8AppenderTyAaZQoTyaZQBmFNaNbNfKQBsyaZv@Base 12
+ _D3std5range10primitives__T3putTSQBf5stdio4File17LockingTextWriterTAaZQBrFNfKQBtQnZv@Base 12
+ _D3std5range10primitives__T3putTSQBf5stdio4File17LockingTextWriterTAwZQBrFNfKQBtQnZv@Base 12
+ _D3std5range10primitives__T3putTSQBf5stdio4File17LockingTextWriterTAxaZQBsFNfKQBuQoZv@Base 12
+ _D3std5range10primitives__T3putTSQBf5stdio4File17LockingTextWriterTAyaZQBsFNfKQBuQoZv@Base 12
+ _D3std5range10primitives__T3putTSQBf5stdio4File17LockingTextWriterTaZQBqFNfKQBsaZv@Base 12
+ _D3std5range10primitives__T3putTSQBf5stdio4File17LockingTextWriterTxaZQBrFNfKQBtxaZv@Base 12
+ _D3std5range10primitives__T3putTSQBf5stdio4File17LockingTextWriterTxwZQBrFNfKQBtxwZv@Base 12
+ _D3std5range10primitives__T3putTSQBf5stdio4File17LockingTextWriterTyaZQBrFNfKQBtyaZv@Base 12
+ _D3std5range10primitives__T3putTSQBf6format4spec__T10singleSpecTyaZQqFAyaZ16DummyOutputRangeTAxaZQCsFNaNbNiNfKQDaQuZv@Base 12
+ _D3std5range10primitives__T3putTSQBf6format8NoOpSinkTAaZQBdFNaNbNiNfKQBlQtZv@Base 12
+ _D3std5range10primitives__T3putTSQBf6format8NoOpSinkTAxaZQBeFNaNbNiNfKQBmQuZv@Base 12
+ _D3std5range10primitives__T3putTSQBf6format8NoOpSinkTAyaZQBeFNaNbNiNfKQBmQuZv@Base 12
+ _D3std5range10primitives__T3putTSQBf6format8NoOpSinkTaZQBcFNaNbNiNfKQBkaZv@Base 12
+ _D3std5range10primitives__T3putTSQBf6format8NoOpSinkTxaZQBdFNaNbNiNfKQBlxaZv@Base 12
+ _D3std5range10primitives__T3putTSQBf6format8NoOpSinkTxwZQBdFNaNbNiNfKQBlxwZv@Base 12
+ _D3std5range10primitives__T3putTSQBf6format__T7sformatTaTxdZQoFNkMAaMAxaxdZ4SinkTAyaZQCgFNaNbNfKQCmQsZv@Base 12
+ _D3std5range10primitives__T3putTSQBf6format__T7sformatTaTxdZQoFNkMAaMAxaxdZ4SinkTQmZQCfFNaNbNfKQClQBdZv@Base 12
+ _D3std5range10primitives__T3putTSQBf6format__T7sformatTaTxdZQoFNkMAaMAxaxdZ4SinkTQpZQCfFNaNbNfKQClQBgZv@Base 12
+ _D3std5range10primitives__T3putTSQBf6format__T7sformatTaTxdZQoFNkMAaMAxaxdZ4SinkTaZQCeFNaNfKQCiaZv@Base 12
+ _D3std5range10primitives__T3putTSQBf6format__T7sformatTaTxdZQoFNkMAaMAxaxdZ4SinkTxaZQCfFNaNfKQCjxaZv@Base 12
+ _D3std5range10primitives__T3putTSQBf6format__T7sformatTaTxdZQoFNkMAaMAxaxdZ4SinkTxwZQCfFNaNfKQCjxwZv@Base 12
+ _D3std5range10primitives__T3putTSQBf6format__T7sformatTaTykTykTkTkTkZQxFNkMAaMAxaykykkkkZ4SinkTAyaZQCuFNaNbNfKQDaQsZv@Base 12
+ _D3std5range10primitives__T3putTSQBf6format__T7sformatTaTykTykTkTkTkZQxFNkMAaMAxaykykkkkZ4SinkTQrZQCtFNaNbNfKQCzQBiZv@Base 12
+ _D3std5range10primitives__T3putTSQBf6format__T7sformatTaTykTykTkTkTkZQxFNkMAaMAxaykykkkkZ4SinkTQuZQCtFNaNbNfKQCzQBlZv@Base 12
+ _D3std5range10primitives__T3putTSQBf6format__T7sformatTaTykTykTkTkTkZQxFNkMAaMAxaykykkkkZ4SinkTaZQCsFNaNfKQCwaZv@Base 12
+ _D3std5range10primitives__T3putTSQBf6format__T7sformatTaTykTykTkTkTkZQxFNkMAaMAxaykykkkkZ4SinkTxaZQCtFNaNfKQCxxaZv@Base 12
+ _D3std5range10primitives__T3putTSQBf6format__T7sformatTaTykTykTkTkTkZQxFNkMAaMAxaykykkkkZ4SinkTxwZQCtFNaNfKQCxxwZv@Base 12
+ _D3std5range10primitives__T4backTAyaZQkFNaNbNcNdNiNfNkMANgAyaZNgQg@Base 12
+ _D3std5range10primitives__T4backTCQBg3zip13ArchiveMemberZQBeFNaNbNcNdNiNfNkMANgCQDaQBuQBtZNgQn@Base 12
+ _D3std5range10primitives__T4backTSQBg12experimental6logger11multilogger16MultiLoggerEntryZQClFNaNbNcNdNiNfNkMANgSQEhQDbQCqQCmQCcZNgQt@Base 12
+ _D3std5range10primitives__T4backTSQBg5regex8internal2ir10NamedGroupZQBpFNaNbNcNdNiNfNkMANgSQDlQCfQCcQBwQBwZNgQt@Base 12
+ _D3std5range10primitives__T4backTSQBg8datetime8timezone13PosixTimeZone10LeapSecondZQCeFNaNbNcNdNiNfNkMANgSQEaQCuQCoQCiQBwZNgQt@Base 12
+ _D3std5range10primitives__T4backTSQBg8datetime8timezone13PosixTimeZone10TransitionZQCeFNaNbNcNdNiNfNkMANgSQEaQCuQCoQCiQBwZNgQt@Base 12
+ _D3std5range10primitives__T4backTSQBg8datetime8timezone13PosixTimeZone14TempTransitionZQCiFNaNbNcNdNiNfNkMANgSQEeQCyQCsQCmQCaZNgQt@Base 12
+ _D3std5range10primitives__T4backTSQBg8internal14unicode_tables15UnicodePropertyZQCbFNaNbNcNdNiNfNkMANgSQDxQCrQClQByZNgQq@Base 12
+ _D3std5range10primitives__T4backTSQBg8internal14unicode_tables9CompEntryZQBuFNaNbNcNdNiNfNkMANgSQDqQCkQCeQBrZNgQq@Base 12
+ _D3std5range10primitives__T4backTaZQiFNaNdNfMAxaZw@Base 12
+ _D3std5range10primitives__T4backTkZQiFNaNbNcNdNiNfNkMANgkZNgk@Base 12
+ _D3std5range10primitives__T4saveTAxaZQkFNaNbNdNiNfNkMANgANgxaZQj@Base 12
+ _D3std5range10primitives__T4saveTAyaZQkFNaNbNdNiNfNkMANgAyaZQh@Base 12
+ _D3std5range10primitives__T4saveTCQBg3zip13ArchiveMemberZQBeFNaNbNdNiNfNkMANgCQCyQBsQBrZQo@Base 12
+ _D3std5range10primitives__T4saveTSQBg5regex8internal2ir10NamedGroupZQBpFNaNbNdNiNfNkMANgSQDjQCdQCaQBuQBuZQu@Base 12
+ _D3std5range10primitives__T4saveTSQBg8datetime8timezone13PosixTimeZone10LeapSecondZQCeFNaNbNdNiNfNkMANgSQDyQCsQCmQCgQBuZQu@Base 12
+ _D3std5range10primitives__T4saveTSQBg8datetime8timezone13PosixTimeZone10TransitionZQCeFNaNbNdNiNfNkMANgSQDyQCsQCmQCgQBuZQu@Base 12
+ _D3std5range10primitives__T4saveTSQBg8datetime8timezone13PosixTimeZone14TempTransitionZQCiFNaNbNdNiNfNkMANgSQEcQCwQCqQCkQByZQu@Base 12
+ _D3std5range10primitives__T4saveTSQBg8internal14unicode_tables15UnicodePropertyZQCbFNaNbNdNiNfNkMANgSQDvQCpQCjQBwZQr@Base 12
+ _D3std5range10primitives__T4saveTSQBg8internal14unicode_tables9CompEntryZQBuFNaNbNdNiNfNkMANgSQDoQCiQCcQBpZQr@Base 12
+ _D3std5range10primitives__T4saveTaZQiFNaNbNdNiNfNkMANgaZQf@Base 12
+ _D3std5range10primitives__T4saveTfZQiFNaNbNdNiNfNkMANgfZQf@Base 12
+ _D3std5range10primitives__T4saveThZQiFNaNbNdNiNfNkMANghZQf@Base 12
+ _D3std5range10primitives__T4saveTkZQiFNaNbNdNiNfNkMANgkZQf@Base 12
+ _D3std5range10primitives__T4saveTuZQiFNaNbNdNiNfNkMANguZQf@Base 12
+ _D3std5range10primitives__T5doPutTAkTkZQmFNaNbNiNfKQrKkZv@Base 12
+ _D3std5range10primitives__T5doPutTDFMAxaZvTAaZQtFKQqKQkZv@Base 12
+ _D3std5range10primitives__T5doPutTDFMAxaZvTAyaZQuFKQrKQlZv@Base 12
+ _D3std5range10primitives__T5doPutTDFMAxaZvTQgZQtFKQqKQqZv@Base 12
+ _D3std5range10primitives__T5doPutTDFNaNbNfAxaZvTAaZQyFNaNbNfKQBbKQrZv@Base 12
+ _D3std5range10primitives__T5doPutTDFNaNbNfAxaZvTAaZQyFNaNbNfKQBbQqZv@Base 12
+ _D3std5range10primitives__T5doPutTDFNaNbNfAxaZvTAyaZQzFNaNbNfKQBcKQsZv@Base 12
+ _D3std5range10primitives__T5doPutTDFNaNbNfAxaZvTQgZQyFNaNbNfKQBbQwZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh5array__T8AppenderTAAyaZQpTQhZQBpFNaNbNfKQBtKQzZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh5array__T8AppenderTAyaZQoTAaZQBoFNaNbNfKQBsKQsZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh5array__T8AppenderTAyaZQoTAwZQBoFNaNfKQBqKQqZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh5array__T8AppenderTAyaZQoTAxaZQBpFNaNbNfKQBtKQtZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh5array__T8AppenderTAyaZQoTQhZQBoFNaNbNfKQBsKQzZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh5array__T8AppenderTAyaZQoTSQCl4path__T16asNormalizedPathTSQDqQDp__T5chainTSQEh3utf__T10byCodeUnitTQDbZQrFQDiZ14ByCodeUnitImplTSQGhQGg__T10OnlyResultTaZQpTQDcZQDnFQDkQBnQDqZ6ResultZQFmFNkMQFaZQtZQHxFNaNbNfKQIbKQHbZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh5array__T8AppenderTAyaZQoTSQClQCk__T5chainTSQDc3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQFbQFa__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQGdFNaNbNfKQGhKQFhZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh5array__T8AppenderTAyaZQoTSQClQCk__T5chainTSQDcQDb__T4TakeTSQDsQDr__T6RepeatTaZQkZQBdTSQEt4conv__T7toCharsVii10TaVEQFv5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQExFQEuQDgZQtZQHgFNaNbNfKQHkKQGkZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh5array__T8AppenderTAyaZQoTaZQBnFNaNbNfKQBrKaZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh5array__T8AppenderTAyaZQoTxaZQBoFNaNbNfKQBsKxaZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh5array__T8AppenderTAyaZQoTxwZQBoFNaNfKQBqKxwZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh5array__T8AppenderTAyaZQoTyaZQBoFNaNbNfKQBsKyaZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh5array__T8AppenderTyAaZQoTAaZQBoFNaNbNfKQBsKQsZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh5array__T8AppenderTyAaZQoTAwZQBoFNaNfKQBqKQqZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh5array__T8AppenderTyAaZQoTAxaZQBpFNaNbNfKQBtKQtZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh5array__T8AppenderTyAaZQoTAyaZQBpFNaNbNfKQBtKQtZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh5array__T8AppenderTyAaZQoTaZQBnFNaNbNfKQBrKaZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh5array__T8AppenderTyAaZQoTxaZQBoFNaNbNfKQBsKxaZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh5array__T8AppenderTyAaZQoTxwZQBoFNaNfKQBqKxwZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh5array__T8AppenderTyAaZQoTyaZQBoFNaNbNfKQBsKyaZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh5stdio4File17LockingTextWriterTAaZQBtFNfKQBtKQoZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh5stdio4File17LockingTextWriterTAwZQBtFNfKQBtKQoZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh5stdio4File17LockingTextWriterTAxaZQBuFNfKQBuKQpZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh5stdio4File17LockingTextWriterTAyaZQBuFNfKQBuKQpZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh5stdio4File17LockingTextWriterTaZQBsFNfKQBsKaZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh5stdio4File17LockingTextWriterTxaZQBtFNfKQBtKxaZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh5stdio4File17LockingTextWriterTxwZQBtFNfKQBtKxwZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh5stdio4File17LockingTextWriterTyaZQBtFNfKQBtKyaZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh6format4spec__T10singleSpecTyaZQqFAyaZ16DummyOutputRangeTAxaZQCuFNaNbNiNfKQDaKQvZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh6format8NoOpSinkTAaZQBfFNaNbNiNfKQBlKQuZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh6format8NoOpSinkTAxaZQBgFNaNbNiNfKQBmKQvZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh6format8NoOpSinkTAyaZQBgFNaNbNiNfKQBmKQvZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh6format8NoOpSinkTaZQBeFNaNbNiNfKQBkKaZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh6format8NoOpSinkTxaZQBfFNaNbNiNfKQBlKxaZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh6format8NoOpSinkTxwZQBfFNaNbNiNfKQBlKxwZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh6format__T7sformatTaTxdZQoFNkMAaMAxaxdZ4SinkTAyaZQCiFNaNbNfKQCmKQtZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh6format__T7sformatTaTxdZQoFNkMAaMAxaxdZ4SinkTQmZQChFNaNbNfKQClKQBeZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh6format__T7sformatTaTxdZQoFNkMAaMAxaxdZ4SinkTQpZQChFNaNbNfKQClKQBhZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh6format__T7sformatTaTxdZQoFNkMAaMAxaxdZ4SinkTaZQCgFNaNfKQCiKaZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh6format__T7sformatTaTxdZQoFNkMAaMAxaxdZ4SinkTxaZQChFNaNfKQCjKxaZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh6format__T7sformatTaTxdZQoFNkMAaMAxaxdZ4SinkTxwZQChFNaNfKQCjKxwZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh6format__T7sformatTaTykTykTkTkTkZQxFNkMAaMAxaykykkkkZ4SinkTAyaZQCwFNaNbNfKQDaKQtZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh6format__T7sformatTaTykTykTkTkTkZQxFNkMAaMAxaykykkkkZ4SinkTQrZQCvFNaNbNfKQCzKQBjZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh6format__T7sformatTaTykTykTkTkTkZQxFNkMAaMAxaykykkkkZ4SinkTQuZQCvFNaNbNfKQCzKQBmZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh6format__T7sformatTaTykTykTkTkTkZQxFNkMAaMAxaykykkkkZ4SinkTaZQCuFNaNfKQCwKaZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh6format__T7sformatTaTykTykTkTkTkZQxFNkMAaMAxaykykkkkZ4SinkTxaZQCvFNaNfKQCxKxaZv@Base 12
+ _D3std5range10primitives__T5doPutTSQBh6format__T7sformatTaTykTykTkTkTkZQxFNkMAaMAxaykykkkkZ4SinkTxwZQCvFNaNfKQCxKxwZv@Base 12
+ _D3std5range10primitives__T5emptyTAAxaZQmFNaNbNdNiNfMKQuZb@Base 12
+ _D3std5range10primitives__T5emptyTAAyaZQmFNaNbNdNiNfMKQuZb@Base 12
+ _D3std5range10primitives__T5emptyTACQBi3zip13ArchiveMemberZQBgFNaNbNdNiNfMKQBpZb@Base 12
+ _D3std5range10primitives__T5emptyTAEQBi3uni__T16UnicodeSetParserTSQCm5regex8internal6parser__T6ParserTAyaTSQEbQBpQBmQBg7CodeGenZQBiZQDi8OperatorZQEoFNaNbNdNiNfMKQExZb@Base 12
+ _D3std5range10primitives__T5emptyTASQBi3uni__T13InversionListTSQCjQBb8GcPolicyZQBhZQCeFNaNbNdNiNfMKQCnZb@Base 12
+ _D3std5range10primitives__T5emptyTASQBi5regex8internal2ir10NamedGroupZQBrFNaNbNdNiNfMKQCaZb@Base 12
+ _D3std5range10primitives__T5emptyTASQBi5regex8internal2ir__T5GroupTmZQjZQBtFNaNbNdNiNfMKQCcZb@Base 12
+ _D3std5range10primitives__T5emptyTASQBi5regex8internal9kickstart__T7ShiftOrTaZQl11ShiftThreadZQCpFNaNbNdNiNfMKQCyZb@Base 12
+ _D3std5range10primitives__T5emptyTASQBi8datetime8timezone13PosixTimeZone10LeapSecondZQCgFNaNbNdNiNfMKQCpZb@Base 12
+ _D3std5range10primitives__T5emptyTASQBi8datetime8timezone13PosixTimeZone10TransitionZQCgFNaNbNdNiNfMKQCpZb@Base 12
+ _D3std5range10primitives__T5emptyTASQBi8datetime8timezone13PosixTimeZone14TempTransitionZQCkFNaNbNdNiNfMKQCtZb@Base 12
+ _D3std5range10primitives__T5emptyTASQBi8typecons__T5TupleTkTkTkZQnZQBoFNaNbNdNiNfMKQBxZb@Base 12
+ _D3std5range10primitives__T5emptyTAaZQkFNaNbNdNiNfMKQsZb@Base 12
+ _D3std5range10primitives__T5emptyTAaZQkFNaNbNdNiNfMQrZb@Base 12
+ _D3std5range10primitives__T5emptyTAbZQkFNaNbNdNiNfMKQsZb@Base 12
+ _D3std5range10primitives__T5emptyTAhZQkFNaNbNdNiNfMKQsZb@Base 12
+ _D3std5range10primitives__T5emptyTAkZQkFNaNbNdNiNfMKQsZb@Base 12
+ _D3std5range10primitives__T5emptyTAkZQkFNaNbNdNiNfMQrZb@Base 12
+ _D3std5range10primitives__T5emptyTAwZQkFNaNbNdNiNfMKQsZb@Base 12
+ _D3std5range10primitives__T5emptyTAwZQkFNaNbNdNiNfMQrZb@Base 12
+ _D3std5range10primitives__T5emptyTAxAaZQmFNaNbNdNiNfMKQuZb@Base 12
+ _D3std5range10primitives__T5emptyTAxSQBj5regex8internal2ir8BytecodeZQBpFNaNbNdNiNfMKQByZb@Base 12
+ _D3std5range10primitives__T5emptyTAxaZQlFNaNbNdNiNfMKQtZb@Base 12
+ _D3std5range10primitives__T5emptyTAxaZQlFNaNbNdNiNfMQsZb@Base 12
+ _D3std5range10primitives__T5emptyTAxhZQlFNaNbNdNiNfMKQtZb@Base 12
+ _D3std5range10primitives__T5emptyTAxkZQlFNaNbNdNiNfMKQtZb@Base 12
+ _D3std5range10primitives__T5emptyTAxkZQlFNaNbNdNiNfMQsZb@Base 12
+ _D3std5range10primitives__T5emptyTAxuZQlFNaNbNdNiNfMKQtZb@Base 12
+ _D3std5range10primitives__T5emptyTAxwZQlFNaNbNdNiNfMKQtZb@Base 12
+ _D3std5range10primitives__T5emptyTAyAaZQmFNaNbNdNiNfMKQuZb@Base 12
+ _D3std5range10primitives__T5emptyTAyAaZQmFNaNbNdNiNfMQtZb@Base 12
+ _D3std5range10primitives__T5emptyTAySQBj8internal14unicode_tables15UnicodePropertyZQCeFNaNbNdNiNfMKQCnZb@Base 12
+ _D3std5range10primitives__T5emptyTAySQBj8internal14unicode_tables9CompEntryZQBxFNaNbNdNiNfMKQCgZb@Base 12
+ _D3std5range10primitives__T5emptyTAyaZQlFNaNbNdNiNfMKQtZb@Base 12
+ _D3std5range10primitives__T5emptyTAyaZQlFNaNbNdNiNfMQsZb@Base 12
+ _D3std5range10primitives__T5emptyTAyhZQlFNaNbNdNiNfMKQtZb@Base 12
+ _D3std5range10primitives__T5emptyTAyhZQlFNaNbNdNiNfMQsZb@Base 12
+ _D3std5range10primitives__T5emptyTAywZQlFNaNbNdNiNfMKQtZb@Base 12
+ _D3std5range10primitives__T5emptyTxASQBj4json9JSONValueZQBdFNaNbNdNiNfMKxQBmZb@Base 12
+ _D3std5range10primitives__T5emptyTxAaZQlFNaNbNdNiNfMKxQtZb@Base 12
+ _D3std5range10primitives__T5emptyTyASQBj8datetime8timezone13PosixTimeZone10LeapSecondZQChFNaNbNdNiNfMKyQCqZb@Base 12
+ _D3std5range10primitives__T5emptyTyASQBj8datetime8timezone13PosixTimeZone10TransitionZQChFNaNbNdNiNfMKyQCqZb@Base 12
+ _D3std5range10primitives__T5frontTAaZQkFNaNbNcNdNiNfNkMANgAaZNgQf@Base 12
+ _D3std5range10primitives__T5frontTAyaZQlFNaNbNcNdNiNfNkMANgAyaZNgQg@Base 12
+ _D3std5range10primitives__T5frontTCQBh3zip13ArchiveMemberZQBfFNaNbNcNdNiNfNkMANgCQDbQBuQBtZNgQn@Base 12
+ _D3std5range10primitives__T5frontTSQBh5regex8internal2ir10NamedGroupZQBqFNaNbNcNdNiNfNkMANgSQDmQCfQCcQBwQBwZNgQt@Base 12
+ _D3std5range10primitives__T5frontTSQBh8datetime8timezone13PosixTimeZone10LeapSecondZQCfFNaNbNcNdNiNfNkMANgSQEbQCuQCoQCiQBwZNgQt@Base 12
+ _D3std5range10primitives__T5frontTSQBh8datetime8timezone13PosixTimeZone10TransitionZQCfFNaNbNcNdNiNfNkMANgSQEbQCuQCoQCiQBwZNgQt@Base 12
+ _D3std5range10primitives__T5frontTSQBh8datetime8timezone13PosixTimeZone14TempTransitionZQCjFNaNbNcNdNiNfNkMANgSQEfQCyQCsQCmQCaZNgQt@Base 12
+ _D3std5range10primitives__T5frontTSQBh8internal14unicode_tables15UnicodePropertyZQCcFNaNbNcNdNiNfNkMANgSQDyQCrQClQByZNgQq@Base 12
+ _D3std5range10primitives__T5frontTSQBh8internal14unicode_tables9CompEntryZQBvFNaNbNcNdNiNfNkMANgSQDrQCkQCeQBrZNgQq@Base 12
+ _D3std5range10primitives__T5frontTaZQjFNaNdNfMAxaZw@Base 12
+ _D3std5range10primitives__T5frontThZQjFNaNbNcNdNiNfNkMANghZNgh@Base 12
+ _D3std5range10primitives__T5frontTkZQjFNaNbNcNdNiNfNkMANgkZNgk@Base 12
+ _D3std5range10primitives__T5frontTuZQjFNaNdNfMAxuZw@Base 12
+ _D3std5range10primitives__T5frontTwZQjFNaNbNcNdNiNfNkMANgwZNgw@Base 12
+ _D3std5range10primitives__T6moveAtTASQBj8datetime8timezone13PosixTimeZone10TransitionZQChFNaNbNiNfQClmZQCp@Base 12
+ _D3std5range10primitives__T6moveAtTSQBi3uni__T13InversionListTSQCjQBb8GcPolicyZQBh__T9IntervalsTAkZQoZQCxFNaNbNiNfQDbmZSQEoQDg17CodepointInterval@Base 12
+ _D3std5range10primitives__T6moveAtTSQBi3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQCgFNaNbNiNfQCkmZa@Base 12
+ _D3std5range10primitives__T6moveAtTSQBi3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQChFNaNbNiNfQClmZxa@Base 12
+ _D3std5range10primitives__T6moveAtTSQBi3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplZQChFNaNbNiNfQClmZya@Base 12
+ _D3std5range10primitives__T6moveAtTSQBi4conv__T7toCharsVii10TaVEQCk5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQDhFNaNbNiNfQDlmZa@Base 12
+ _D3std5range10primitives__T6moveAtTSQBiQBh__T10OnlyResultTaZQpZQBkFNaNbNiNfQBomZa@Base 12
+ _D3std5range10primitives__T6moveAtTSQBiQBh__T6RepeatTaZQkZQBfFNaNbNiNfQBjmZa@Base 12
+ _D3std5range10primitives__T7popBackTAyaZQnFNaNbNiNfMKANgAyaZv@Base 12
+ _D3std5range10primitives__T7popBackTCQBj3zip13ArchiveMemberZQBhFNaNbNiNfMKANgCQCyQBpQBoZv@Base 12
+ _D3std5range10primitives__T7popBackTSQBj12experimental6logger11multilogger16MultiLoggerEntryZQCoFNaNbNiNfMKANgSQEfQCwQClQChQBxZv@Base 12
+ _D3std5range10primitives__T7popBackTSQBj4file15DirIteratorImpl9DirHandleZQBuFNaNbNiNfMKANgSQDlQCcQCaQBmZv@Base 12
+ _D3std5range10primitives__T7popBackTSQBj4file8DirEntryZQBcFNaNbNiNfMKANgSQCtQBkQBiZv@Base 12
+ _D3std5range10primitives__T7popBackTSQBj5regex8internal2ir10NamedGroupZQBsFNaNbNiNfMKANgSQDjQCaQBxQBrQBrZv@Base 12
+ _D3std5range10primitives__T7popBackTSQBj8datetime8timezone13PosixTimeZone10LeapSecondZQChFNaNbNiNfMKANgSQDyQCpQCjQCdQBrZv@Base 12
+ _D3std5range10primitives__T7popBackTSQBj8datetime8timezone13PosixTimeZone10TransitionZQChFNaNbNiNfMKANgSQDyQCpQCjQCdQBrZv@Base 12
+ _D3std5range10primitives__T7popBackTSQBj8datetime8timezone13PosixTimeZone14TempTransitionZQClFNaNbNiNfMKANgSQEcQCtQCnQChQBvZv@Base 12
+ _D3std5range10primitives__T7popBackTSQBj8internal14unicode_tables15UnicodePropertyZQCeFNaNbNiNfMKANgSQDvQCmQCgQBtZv@Base 12
+ _D3std5range10primitives__T7popBackTSQBj8internal14unicode_tables9CompEntryZQBxFNaNbNiNfMKANgSQDoQCfQBzQBmZv@Base 12
+ _D3std5range10primitives__T7popBackTaZQlFNaNfMKANgaZv@Base 12
+ _D3std5range10primitives__T7popBackTkZQlFNaNbNiNfMKANgkZv@Base 12
+ _D3std5range10primitives__T7putCharTDFNaNbNfAxaZvTxwZQBaFNaNfKQBaxwZv@Base 12
+ _D3std5range10primitives__T8moveBackTASQBl8datetime8timezone13PosixTimeZone10TransitionZQCjFNaNbNiNfQClZQCo@Base 12
+ _D3std5range10primitives__T8moveBackTSQBk3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQCiFNaNbNiNfQCkZa@Base 12
+ _D3std5range10primitives__T8moveBackTSQBk3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQCjFNaNbNiNfQClZxa@Base 12
+ _D3std5range10primitives__T8moveBackTSQBk3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplZQCjFNaNbNiNfQClZya@Base 12
+ _D3std5range10primitives__T8moveBackTSQBk4conv__T7toCharsVii10TaVEQCm5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQDjFNaNbNiNfQDlZa@Base 12
+ _D3std5range10primitives__T8moveBackTSQBkQBj__T10OnlyResultTaZQpZQBmFNaNbNiNfQBoZa@Base 12
+ _D3std5range10primitives__T8popFrontTAaZQnFNaNbNiNfMKANgAaZv@Base 12
+ _D3std5range10primitives__T8popFrontTAyaZQoFNaNbNiNfMKANgAyaZv@Base 12
+ _D3std5range10primitives__T8popFrontTCQBk3zip13ArchiveMemberZQBiFNaNbNiNfMKANgCQCzQBpQBoZv@Base 12
+ _D3std5range10primitives__T8popFrontTSQBk5regex8internal2ir10NamedGroupZQBtFNaNbNiNfMKANgSQDkQCaQBxQBrQBrZv@Base 12
+ _D3std5range10primitives__T8popFrontTSQBk8datetime8timezone13PosixTimeZone10LeapSecondZQCiFNaNbNiNfMKANgSQDzQCpQCjQCdQBrZv@Base 12
+ _D3std5range10primitives__T8popFrontTSQBk8datetime8timezone13PosixTimeZone10TransitionZQCiFNaNbNiNfMKANgSQDzQCpQCjQCdQBrZv@Base 12
+ _D3std5range10primitives__T8popFrontTSQBk8datetime8timezone13PosixTimeZone14TempTransitionZQCmFNaNbNiNfMKANgSQEdQCtQCnQChQBvZv@Base 12
+ _D3std5range10primitives__T8popFrontTSQBk8internal14unicode_tables15UnicodePropertyZQCfFNaNbNiNfMKANgSQDwQCmQCgQBtZv@Base 12
+ _D3std5range10primitives__T8popFrontTSQBk8internal14unicode_tables9CompEntryZQByFNaNbNiNfMKANgSQDpQCfQBzQBmZv@Base 12
+ _D3std5range10primitives__T8popFrontTaZQmFNaNbNeMKANgaZ12charWidthTabyAh@Base 12
+ _D3std5range10primitives__T8popFrontTaZQmFNaNbNiNeMKANgaZv@Base 12
+ _D3std5range10primitives__T8popFrontThZQmFNaNbNiNfMKANghZv@Base 12
+ _D3std5range10primitives__T8popFrontTkZQmFNaNbNiNfMKANgkZv@Base 12
+ _D3std5range10primitives__T8popFrontTuZQmFNaNbNiNeMKANguZv@Base 12
+ _D3std5range10primitives__T8popFrontTwZQmFNaNbNiNfMKANgwZv@Base 12
+ _D3std5range10primitives__T9moveFrontTASQBm8datetime8timezone13PosixTimeZone10TransitionZQCkFNaNbNiNfQClZQCo@Base 12
+ _D3std5range10primitives__T9moveFrontTSQBl3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQCjFNaNbNiNfQCkZa@Base 12
+ _D3std5range10primitives__T9moveFrontTSQBl3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQCkFNaNbNiNfQClZxa@Base 12
+ _D3std5range10primitives__T9moveFrontTSQBl3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplZQCkFNaNbNiNfQClZya@Base 12
+ _D3std5range10primitives__T9moveFrontTSQBl3utf__T5byUTFTwVEQCf8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDiTSQFiQDx__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFlFNcQCfZ6ResultZQGzFNaNbNiNfQHaZw@Base 12
+ _D3std5range10primitives__T9moveFrontTSQBl4conv__T7toCharsVii10TaVEQCn5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQDkFNaNbNiNfQDlZa@Base 12
+ _D3std5range10primitives__T9moveFrontTSQBl9algorithm9iteration__T12FilterResultSQDa8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda3TSQExQEw__T4iotaTmTxmZQlFmxmZ6ResultZQDvZQFlFNaNbNiQFkZm@Base 12
+ _D3std5range10primitives__T9moveFrontTSQBl9algorithm9iteration__T6joinerTSQCuQBjQBc__T9MapResultSQDr8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQFoQEdQDw__T12FilterResultSQGpQCyQCsQCmMxFNbNdZ9__lambda1TSQHvQHu__T4iotaTmTmZQkFmmZ6ResultZQDcZQGaZQGzFQGvZQyZQIwFNaNbNiQIvZm@Base 12
+ _D3std5range10primitives__T9moveFrontTSQBlQBk__T10OnlyResultTaZQpZQBnFNaNbNiNfQBoZa@Base 12
+ _D3std5range10primitives__T9moveFrontTSQBlQBk__T6RepeatTaZQkZQBiFNaNbNiNfQBjZa@Base 12
+ _D3std5range10primitives__T9popFrontNTAhZQoFNaNbNiNfKQpmZm@Base 12
+ _D3std5range10primitives__T9popFrontNTAxSQBn5regex8internal2ir8BytecodeZQBtFNaNbNiNfKQBvmZm@Base 12
+ _D3std5range10primitives__T9popFrontNTSQBl5regex8internal6parser__T6ParserTAyaTSQDaQBpQBmQBg7CodeGenZQBiZQDaFNaNfKQCymZm@Base 12
+ _D3std5range11__moduleRefZ@Base 12
+ _D3std5range12__ModuleInfoZ@Base 12
+ _D3std5range8NullSink6__initZ@Base 12
+ _D3std5range8nullSinkFNaNbNcNiNfZSQBgQBf8NullSink@Base 12
+ _D3std5range8nullSinkFNcZ4sinkSQBdQBc8NullSink@Base 12
+ _D3std5range__T10OnlyResultTaZQp10fetchFrontMFNaNbNiNeZa@Base 12
+ _D3std5range__T10OnlyResultTaZQp4backMFNaNbNdNiNfZa@Base 12
+ _D3std5range__T10OnlyResultTaZQp4saveMFNaNbNdNiNfZSQBxQBw__TQBtTaZQBz@Base 12
+ _D3std5range__T10OnlyResultTaZQp5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std5range__T10OnlyResultTaZQp5frontMFNaNbNdNiNfZa@Base 12
+ _D3std5range__T10OnlyResultTaZQp6__initZ@Base 12
+ _D3std5range__T10OnlyResultTaZQp6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std5range__T10OnlyResultTaZQp7opIndexMFNaNbNiNfmZa@Base 12
+ _D3std5range__T10OnlyResultTaZQp7opSliceMFNaNbNiNfZSQByQBx__TQBuTaZQCa@Base 12
+ _D3std5range__T10OnlyResultTaZQp7opSliceMFNaNbNiNfmmZSQCaQBz__TQBwTaZQCc@Base 12
+ _D3std5range__T10OnlyResultTaZQp7popBackMFNaNbNiNfZv@Base 12
+ _D3std5range__T10OnlyResultTaZQp8popFrontMFNaNbNiNfZv@Base 12
+ _D3std5range__T10OnlyResultTaZQp__T6__ctorZQiMFNaNbNcNiNfKaZSQChQCg__TQCdTaZQCj@Base 12
+ _D3std5range__T10roundRobinTSQBb9algorithm9iteration__T9MapResultSQCm10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQEq3uni21DecompressedIntervalsZQDuTSQGaQEzQEs__TQElSQGqQEe__TQDvVQDpa4_615b315dVQEea1_61ZQExTQDqZQGiZQIaFNaNbNiNfQHzQDdZSQJhQJg__TQJdTQIuTQDzZQJpFQJgQEkZ6Result@Base 12
+ _D3std5range__T10roundRobinTSQBb9algorithm9iteration__T9MapResultSQCm10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQEq3uni21DecompressedIntervalsZQDuTSQGaQEzQEs__TQElSQGqQEe__TQDvVQDpa4_615b315dVQEea1_61ZQExTQDqZQGiZQIaFQHrQCvZ6Result11__xopEqualsMxFKxSQJyQJx__TQJuTQJlTQEqZQKgFQJxQFbZQCgZb@Base 12
+ _D3std5range__T10roundRobinTSQBb9algorithm9iteration__T9MapResultSQCm10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQEq3uni21DecompressedIntervalsZQDuTSQGaQEzQEs__TQElSQGqQEe__TQDvVQDpa4_615b315dVQEea1_61ZQExTQDqZQGiZQIaFQHrQCvZ6Result4saveMFNaNdNfZSQJuQJt__TQJqTQJhTQEmZQKcFQJtQExZQCc@Base 12
+ _D3std5range__T10roundRobinTSQBb9algorithm9iteration__T9MapResultSQCm10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQEq3uni21DecompressedIntervalsZQDuTSQGaQEzQEs__TQElSQGqQEe__TQDvVQDpa4_615b315dVQEea1_61ZQExTQDqZQGiZQIaFQHrQCvZ6Result5emptyMFNaNdNfZb@Base 12
+ _D3std5range__T10roundRobinTSQBb9algorithm9iteration__T9MapResultSQCm10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQEq3uni21DecompressedIntervalsZQDuTSQGaQEzQEs__TQElSQGqQEe__TQDvVQDpa4_615b315dVQEea1_61ZQExTQDqZQGiZQIaFQHrQCvZ6Result5frontMFNaNdNfZk@Base 12
+ _D3std5range__T10roundRobinTSQBb9algorithm9iteration__T9MapResultSQCm10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQEq3uni21DecompressedIntervalsZQDuTSQGaQEzQEs__TQElSQGqQEe__TQDvVQDpa4_615b315dVQEea1_61ZQExTQDqZQGiZQIaFQHrQCvZ6Result6__initZ@Base 12
+ _D3std5range__T10roundRobinTSQBb9algorithm9iteration__T9MapResultSQCm10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQEq3uni21DecompressedIntervalsZQDuTSQGaQEzQEs__TQElSQGqQEe__TQDvVQDpa4_615b315dVQEea1_61ZQExTQDqZQGiZQIaFQHrQCvZ6Result8popFrontMFNaNfZv@Base 12
+ _D3std5range__T10roundRobinTSQBb9algorithm9iteration__T9MapResultSQCm10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQEq3uni21DecompressedIntervalsZQDuTSQGaQEzQEs__TQElSQGqQEe__TQDvVQDpa4_615b315dVQEea1_61ZQExTQDqZQGiZQIaFQHrQCvZ6Result9__xtoHashFNbNeKxSQJxQJw__TQJtTQJkTQEpZQKfFQJwQFaZQCfZm@Base 12
+ _D3std5range__T11SortedRangeTAAyaVQea5_61203c2062VEQBxQBw18SortedRangeOptionsi0ZQCn11__xopEqualsMxFKxSQDwQDv__TQDsTQDiVQDla5_61203c2062VQDii0ZQExZb@Base 12
+ _D3std5range__T11SortedRangeTAAyaVQea5_61203c2062VEQBxQBw18SortedRangeOptionsi0ZQCn4backMFNaNbNcNdNiNfZQCv@Base 12
+ _D3std5range__T11SortedRangeTAAyaVQea5_61203c2062VEQBxQBw18SortedRangeOptionsi0ZQCn4saveMFNaNbNdNiNfZSQDwQDv__TQDsTQDiVQDla5_61203c2062VQDii0ZQEx@Base 12
+ _D3std5range__T11SortedRangeTAAyaVQea5_61203c2062VEQBxQBw18SortedRangeOptionsi0ZQCn5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std5range__T11SortedRangeTAAyaVQea5_61203c2062VEQBxQBw18SortedRangeOptionsi0ZQCn5frontMFNaNbNcNdNiNfZQCw@Base 12
+ _D3std5range__T11SortedRangeTAAyaVQea5_61203c2062VEQBxQBw18SortedRangeOptionsi0ZQCn6__ctorMFNaNbNcNiNfQCvZSQEbQEa__TQDxTQDnVQDqa5_61203c2062VQDni0ZQFc@Base 12
+ _D3std5range__T11SortedRangeTAAyaVQea5_61203c2062VEQBxQBw18SortedRangeOptionsi0ZQCn6__initZ@Base 12
+ _D3std5range__T11SortedRangeTAAyaVQea5_61203c2062VEQBxQBw18SortedRangeOptionsi0ZQCn7opIndexMFNaNbNcNiNfmZQCx@Base 12
+ _D3std5range__T11SortedRangeTAAyaVQea5_61203c2062VEQBxQBw18SortedRangeOptionsi0ZQCn7opSliceMFNaNbNiNjNfmmZSQEbQEa__TQDxTQDnVQDqa5_61203c2062VQDni0ZQFc@Base 12
+ _D3std5range__T11SortedRangeTAAyaVQea5_61203c2062VEQBxQBw18SortedRangeOptionsi0ZQCn7popBackMFNaNbNiNfZv@Base 12
+ _D3std5range__T11SortedRangeTAAyaVQea5_61203c2062VEQBxQBw18SortedRangeOptionsi0ZQCn7releaseMFNaNbNiNjNfZQCx@Base 12
+ _D3std5range__T11SortedRangeTAAyaVQea5_61203c2062VEQBxQBw18SortedRangeOptionsi0ZQCn8popFrontMFNaNbNiNfZv@Base 12
+ _D3std5range__T11SortedRangeTAAyaVQea5_61203c2062VEQBxQBw18SortedRangeOptionsi0ZQCn9__mixin246lengthMFNaNbNdNiNfZm@Base 12
+ _D3std5range__T11SortedRangeTAAyaVQea5_61203c2062VEQBxQBw18SortedRangeOptionsi0ZQCn9__xtoHashFNbNeKxSQDvQDu__TQDrTQDhVQDka5_61203c2062VQDhi0ZQEwZm@Base 12
+ _D3std5range__T11SortedRangeTACQBd3zip13ArchiveMemberSQCaQx10ZipArchive5buildMFNaNfZ9__lambda6VEQDqQDp18SortedRangeOptionsi0ZQEg11__xopEqualsMxFKxSQFpQFo__TQFlTQFbSQGgQFdQEhQDyMFNaNfZQDvVQDoi0ZQGwZb@Base 12
+ _D3std5range__T11SortedRangeTACQBd3zip13ArchiveMemberSQCaQx10ZipArchive5buildMFNaNfZ9__lambda6VEQDqQDp18SortedRangeOptionsi0ZQEg4backMFNaNbNcNdNiNfZQEo@Base 12
+ _D3std5range__T11SortedRangeTACQBd3zip13ArchiveMemberSQCaQx10ZipArchive5buildMFNaNfZ9__lambda6VEQDqQDp18SortedRangeOptionsi0ZQEg4saveMFNaNbNdNiNfZSQFpQFo__TQFlTQFbSQGgQFdQEhQDyMFNaNfZQDvVQDoi0ZQGw@Base 12
+ _D3std5range__T11SortedRangeTACQBd3zip13ArchiveMemberSQCaQx10ZipArchive5buildMFNaNfZ9__lambda6VEQDqQDp18SortedRangeOptionsi0ZQEg5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std5range__T11SortedRangeTACQBd3zip13ArchiveMemberSQCaQx10ZipArchive5buildMFNaNfZ9__lambda6VEQDqQDp18SortedRangeOptionsi0ZQEg5frontMFNaNbNcNdNiNfZQEp@Base 12
+ _D3std5range__T11SortedRangeTACQBd3zip13ArchiveMemberSQCaQx10ZipArchive5buildMFNaNfZ9__lambda6VEQDqQDp18SortedRangeOptionsi0ZQEg6__ctorMFNaNbNcNiNfQEoZSQFuQFt__TQFqTQFgSQGlQFiQEmQEdMFNaNfZQEaVQDti0ZQHb@Base 12
+ _D3std5range__T11SortedRangeTACQBd3zip13ArchiveMemberSQCaQx10ZipArchive5buildMFNaNfZ9__lambda6VEQDqQDp18SortedRangeOptionsi0ZQEg6__initZ@Base 12
+ _D3std5range__T11SortedRangeTACQBd3zip13ArchiveMemberSQCaQx10ZipArchive5buildMFNaNfZ9__lambda6VEQDqQDp18SortedRangeOptionsi0ZQEg7opIndexMFNaNbNcNiNfmZQEq@Base 12
+ _D3std5range__T11SortedRangeTACQBd3zip13ArchiveMemberSQCaQx10ZipArchive5buildMFNaNfZ9__lambda6VEQDqQDp18SortedRangeOptionsi0ZQEg7opSliceMFNaNbNiNjNfmmZSQFuQFt__TQFqTQFgSQGlQFiQEmQEdMFNaNfZQEaVQDti0ZQHb@Base 12
+ _D3std5range__T11SortedRangeTACQBd3zip13ArchiveMemberSQCaQx10ZipArchive5buildMFNaNfZ9__lambda6VEQDqQDp18SortedRangeOptionsi0ZQEg7popBackMFNaNbNiNfZv@Base 12
+ _D3std5range__T11SortedRangeTACQBd3zip13ArchiveMemberSQCaQx10ZipArchive5buildMFNaNfZ9__lambda6VEQDqQDp18SortedRangeOptionsi0ZQEg7releaseMFNaNbNiNjNfZQEq@Base 12
+ _D3std5range__T11SortedRangeTACQBd3zip13ArchiveMemberSQCaQx10ZipArchive5buildMFNaNfZ9__lambda6VEQDqQDp18SortedRangeOptionsi0ZQEg8popFrontMFNaNbNiNfZv@Base 12
+ _D3std5range__T11SortedRangeTACQBd3zip13ArchiveMemberSQCaQx10ZipArchive5buildMFNaNfZ9__lambda6VEQDqQDp18SortedRangeOptionsi0ZQEg9__mixin246lengthMFNaNbNdNiNfZm@Base 12
+ _D3std5range__T11SortedRangeTACQBd3zip13ArchiveMemberSQCaQx10ZipArchive5buildMFNaNfZ9__lambda6VEQDqQDp18SortedRangeOptionsi0ZQEg9__xtoHashFNbNeKxSQFoQFn__TQFkTQFaSQGfQFcQEgQDxMFNaNfZQDuVQDni0ZQGvZm@Base 12
+ _D3std5range__T11SortedRangeTASQBd5regex8internal2ir10NamedGroupVAyaa15_612e6e616d65203c20622e6e616d65VEQDyQDx18SortedRangeOptionsi0ZQEo11__xopEqualsMxFKxSQFxQFw__TQFtTQFjVQEda15_612e6e616d65203c20622e6e616d65VQEdi0ZQHtZb@Base 12
+ _D3std5range__T11SortedRangeTASQBd5regex8internal2ir10NamedGroupVAyaa15_612e6e616d65203c20622e6e616d65VEQDyQDx18SortedRangeOptionsi0ZQEo4backMFNaNbNcNdNiNfZQEw@Base 12
+ _D3std5range__T11SortedRangeTASQBd5regex8internal2ir10NamedGroupVAyaa15_612e6e616d65203c20622e6e616d65VEQDyQDx18SortedRangeOptionsi0ZQEo4saveMFNaNbNdNiNfZSQFxQFw__TQFtTQFjVQEda15_612e6e616d65203c20622e6e616d65VQEdi0ZQHt@Base 12
+ _D3std5range__T11SortedRangeTASQBd5regex8internal2ir10NamedGroupVAyaa15_612e6e616d65203c20622e6e616d65VEQDyQDx18SortedRangeOptionsi0ZQEo5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std5range__T11SortedRangeTASQBd5regex8internal2ir10NamedGroupVAyaa15_612e6e616d65203c20622e6e616d65VEQDyQDx18SortedRangeOptionsi0ZQEo5frontMFNaNbNcNdNiNfZQEx@Base 12
+ _D3std5range__T11SortedRangeTASQBd5regex8internal2ir10NamedGroupVAyaa15_612e6e616d65203c20622e6e616d65VEQDyQDx18SortedRangeOptionsi0ZQEo6__ctorMFNaNbNcNiNfQEwZSQGcQGb__TQFyTQFoVQEia15_612e6e616d65203c20622e6e616d65VQEii0ZQHy@Base 12
+ _D3std5range__T11SortedRangeTASQBd5regex8internal2ir10NamedGroupVAyaa15_612e6e616d65203c20622e6e616d65VEQDyQDx18SortedRangeOptionsi0ZQEo6__initZ@Base 12
+ _D3std5range__T11SortedRangeTASQBd5regex8internal2ir10NamedGroupVAyaa15_612e6e616d65203c20622e6e616d65VEQDyQDx18SortedRangeOptionsi0ZQEo7opIndexMFNaNbNcNiNfmZQEy@Base 12
+ _D3std5range__T11SortedRangeTASQBd5regex8internal2ir10NamedGroupVAyaa15_612e6e616d65203c20622e6e616d65VEQDyQDx18SortedRangeOptionsi0ZQEo7opSliceMFNaNbNiNjNfmmZSQGcQGb__TQFyTQFoVQEia15_612e6e616d65203c20622e6e616d65VQEii0ZQHy@Base 12
+ _D3std5range__T11SortedRangeTASQBd5regex8internal2ir10NamedGroupVAyaa15_612e6e616d65203c20622e6e616d65VEQDyQDx18SortedRangeOptionsi0ZQEo7popBackMFNaNbNiNfZv@Base 12
+ _D3std5range__T11SortedRangeTASQBd5regex8internal2ir10NamedGroupVAyaa15_612e6e616d65203c20622e6e616d65VEQDyQDx18SortedRangeOptionsi0ZQEo7releaseMFNaNbNiNjNfZQEy@Base 12
+ _D3std5range__T11SortedRangeTASQBd5regex8internal2ir10NamedGroupVAyaa15_612e6e616d65203c20622e6e616d65VEQDyQDx18SortedRangeOptionsi0ZQEo8popFrontMFNaNbNiNfZv@Base 12
+ _D3std5range__T11SortedRangeTASQBd5regex8internal2ir10NamedGroupVAyaa15_612e6e616d65203c20622e6e616d65VEQDyQDx18SortedRangeOptionsi0ZQEo9__mixin246lengthMFNaNbNdNiNfZm@Base 12
+ _D3std5range__T11SortedRangeTASQBd5regex8internal2ir10NamedGroupVAyaa15_612e6e616d65203c20622e6e616d65VEQDyQDx18SortedRangeOptionsi0ZQEo9__xtoHashFNbNeKxSQFwQFv__TQFsTQFiVQEca15_612e6e616d65203c20622e6e616d65VQEci0ZQHsZm@Base 12
+ _D3std5range__T11SortedRangeTASQBd5regex8internal2ir10NamedGroupVAyaa15_612e6e616d65203c20622e6e616d65VEQDyQDx18SortedRangeOptionsi0ZQEo__T10lowerBoundVEQFvQFu12SearchPolicyi3TQFqZQBpMFNaNbNiNfQGhZSQHoQHn__TQHkTQHaVQFua15_612e6e616d65203c20622e6e616d65VQFui0ZQJk@Base 12
+ _D3std5range__T11SortedRangeTASQBd5regex8internal2ir10NamedGroupVAyaa15_612e6e616d65203c20622e6e616d65VEQDyQDx18SortedRangeOptionsi0ZQEo__T18getTransitionIndexVEQGdQGc12SearchPolicyi3SQHaQGz__TQGwTQGmVQFga15_612e6e616d65203c20622e6e616d65VQFgi0ZQIw3geqTQIpZQEoMFNaNbNiNfQJgZm@Base 12
+ _D3std5range__T11SortedRangeTASQBd5regex8internal2ir10NamedGroupVAyaa15_612e6e616d65203c20622e6e616d65VEQDyQDx18SortedRangeOptionsi0ZQEo__T3geqTQEkTQEoZQnMFNaNbNiNfQFeQFhZb@Base 12
+ _D3std5range__T11SortedRangeTASQBd8datetime8timezone13PosixTimeZone10LeapSecondVAyaa17_612e74696d6554203c20622e74696d6554VEQErQEq18SortedRangeOptionsi0ZQFh11__xopEqualsMxFKxSQGqQGp__TQGmTQGcVQEha17_612e74696d6554203c20622e74696d6554VQEhi0ZQIqZb@Base 12
+ _D3std5range__T11SortedRangeTASQBd8datetime8timezone13PosixTimeZone10LeapSecondVAyaa17_612e74696d6554203c20622e74696d6554VEQErQEq18SortedRangeOptionsi0ZQFh4backMFNaNbNcNdNiNfZQFp@Base 12
+ _D3std5range__T11SortedRangeTASQBd8datetime8timezone13PosixTimeZone10LeapSecondVAyaa17_612e74696d6554203c20622e74696d6554VEQErQEq18SortedRangeOptionsi0ZQFh4saveMFNaNbNdNiNfZSQGqQGp__TQGmTQGcVQEha17_612e74696d6554203c20622e74696d6554VQEhi0ZQIq@Base 12
+ _D3std5range__T11SortedRangeTASQBd8datetime8timezone13PosixTimeZone10LeapSecondVAyaa17_612e74696d6554203c20622e74696d6554VEQErQEq18SortedRangeOptionsi0ZQFh5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std5range__T11SortedRangeTASQBd8datetime8timezone13PosixTimeZone10LeapSecondVAyaa17_612e74696d6554203c20622e74696d6554VEQErQEq18SortedRangeOptionsi0ZQFh5frontMFNaNbNcNdNiNfZQFq@Base 12
+ _D3std5range__T11SortedRangeTASQBd8datetime8timezone13PosixTimeZone10LeapSecondVAyaa17_612e74696d6554203c20622e74696d6554VEQErQEq18SortedRangeOptionsi0ZQFh6__ctorMFNaNbNcNiNfQFpZSQGvQGu__TQGrTQGhVQEma17_612e74696d6554203c20622e74696d6554VQEmi0ZQIv@Base 12
+ _D3std5range__T11SortedRangeTASQBd8datetime8timezone13PosixTimeZone10LeapSecondVAyaa17_612e74696d6554203c20622e74696d6554VEQErQEq18SortedRangeOptionsi0ZQFh6__initZ@Base 12
+ _D3std5range__T11SortedRangeTASQBd8datetime8timezone13PosixTimeZone10LeapSecondVAyaa17_612e74696d6554203c20622e74696d6554VEQErQEq18SortedRangeOptionsi0ZQFh7opIndexMFNaNbNcNiNfmZQFr@Base 12
+ _D3std5range__T11SortedRangeTASQBd8datetime8timezone13PosixTimeZone10LeapSecondVAyaa17_612e74696d6554203c20622e74696d6554VEQErQEq18SortedRangeOptionsi0ZQFh7opSliceMFNaNbNiNjNfmmZSQGvQGu__TQGrTQGhVQEma17_612e74696d6554203c20622e74696d6554VQEmi0ZQIv@Base 12
+ _D3std5range__T11SortedRangeTASQBd8datetime8timezone13PosixTimeZone10LeapSecondVAyaa17_612e74696d6554203c20622e74696d6554VEQErQEq18SortedRangeOptionsi0ZQFh7popBackMFNaNbNiNfZv@Base 12
+ _D3std5range__T11SortedRangeTASQBd8datetime8timezone13PosixTimeZone10LeapSecondVAyaa17_612e74696d6554203c20622e74696d6554VEQErQEq18SortedRangeOptionsi0ZQFh7releaseMFNaNbNiNjNfZQFr@Base 12
+ _D3std5range__T11SortedRangeTASQBd8datetime8timezone13PosixTimeZone10LeapSecondVAyaa17_612e74696d6554203c20622e74696d6554VEQErQEq18SortedRangeOptionsi0ZQFh8popFrontMFNaNbNiNfZv@Base 12
+ _D3std5range__T11SortedRangeTASQBd8datetime8timezone13PosixTimeZone10LeapSecondVAyaa17_612e74696d6554203c20622e74696d6554VEQErQEq18SortedRangeOptionsi0ZQFh9__mixin246lengthMFNaNbNdNiNfZm@Base 12
+ _D3std5range__T11SortedRangeTASQBd8datetime8timezone13PosixTimeZone10LeapSecondVAyaa17_612e74696d6554203c20622e74696d6554VEQErQEq18SortedRangeOptionsi0ZQFh9__xtoHashFNbNeKxSQGpQGo__TQGlTQGbVQEga17_612e74696d6554203c20622e74696d6554VQEgi0ZQIpZm@Base 12
+ _D3std5range__T11SortedRangeTASQBd8datetime8timezone13PosixTimeZone14TempTransitionVAyaa17_612e74696d6554203c20622e74696d6554VEQEvQEu18SortedRangeOptionsi0ZQFl11__xopEqualsMxFKxSQGuQGt__TQGqTQGgVQEha17_612e74696d6554203c20622e74696d6554VQEhi0ZQIuZb@Base 12
+ _D3std5range__T11SortedRangeTASQBd8datetime8timezone13PosixTimeZone14TempTransitionVAyaa17_612e74696d6554203c20622e74696d6554VEQEvQEu18SortedRangeOptionsi0ZQFl4backMFNaNbNcNdNiNfZQFt@Base 12
+ _D3std5range__T11SortedRangeTASQBd8datetime8timezone13PosixTimeZone14TempTransitionVAyaa17_612e74696d6554203c20622e74696d6554VEQEvQEu18SortedRangeOptionsi0ZQFl4saveMFNaNbNdNiNfZSQGuQGt__TQGqTQGgVQEha17_612e74696d6554203c20622e74696d6554VQEhi0ZQIu@Base 12
+ _D3std5range__T11SortedRangeTASQBd8datetime8timezone13PosixTimeZone14TempTransitionVAyaa17_612e74696d6554203c20622e74696d6554VEQEvQEu18SortedRangeOptionsi0ZQFl5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std5range__T11SortedRangeTASQBd8datetime8timezone13PosixTimeZone14TempTransitionVAyaa17_612e74696d6554203c20622e74696d6554VEQEvQEu18SortedRangeOptionsi0ZQFl5frontMFNaNbNcNdNiNfZQFu@Base 12
+ _D3std5range__T11SortedRangeTASQBd8datetime8timezone13PosixTimeZone14TempTransitionVAyaa17_612e74696d6554203c20622e74696d6554VEQEvQEu18SortedRangeOptionsi0ZQFl6__ctorMFNaNbNcNiNfQFtZSQGzQGy__TQGvTQGlVQEma17_612e74696d6554203c20622e74696d6554VQEmi0ZQIz@Base 12
+ _D3std5range__T11SortedRangeTASQBd8datetime8timezone13PosixTimeZone14TempTransitionVAyaa17_612e74696d6554203c20622e74696d6554VEQEvQEu18SortedRangeOptionsi0ZQFl6__initZ@Base 12
+ _D3std5range__T11SortedRangeTASQBd8datetime8timezone13PosixTimeZone14TempTransitionVAyaa17_612e74696d6554203c20622e74696d6554VEQEvQEu18SortedRangeOptionsi0ZQFl7opIndexMFNaNbNcNiNfmZQFv@Base 12
+ _D3std5range__T11SortedRangeTASQBd8datetime8timezone13PosixTimeZone14TempTransitionVAyaa17_612e74696d6554203c20622e74696d6554VEQEvQEu18SortedRangeOptionsi0ZQFl7opSliceMFNaNbNiNjNfmmZSQGzQGy__TQGvTQGlVQEma17_612e74696d6554203c20622e74696d6554VQEmi0ZQIz@Base 12
+ _D3std5range__T11SortedRangeTASQBd8datetime8timezone13PosixTimeZone14TempTransitionVAyaa17_612e74696d6554203c20622e74696d6554VEQEvQEu18SortedRangeOptionsi0ZQFl7popBackMFNaNbNiNfZv@Base 12
+ _D3std5range__T11SortedRangeTASQBd8datetime8timezone13PosixTimeZone14TempTransitionVAyaa17_612e74696d6554203c20622e74696d6554VEQEvQEu18SortedRangeOptionsi0ZQFl7releaseMFNaNbNiNjNfZQFv@Base 12
+ _D3std5range__T11SortedRangeTASQBd8datetime8timezone13PosixTimeZone14TempTransitionVAyaa17_612e74696d6554203c20622e74696d6554VEQEvQEu18SortedRangeOptionsi0ZQFl8popFrontMFNaNbNiNfZv@Base 12
+ _D3std5range__T11SortedRangeTASQBd8datetime8timezone13PosixTimeZone14TempTransitionVAyaa17_612e74696d6554203c20622e74696d6554VEQEvQEu18SortedRangeOptionsi0ZQFl9__mixin246lengthMFNaNbNdNiNfZm@Base 12
+ _D3std5range__T11SortedRangeTASQBd8datetime8timezone13PosixTimeZone14TempTransitionVAyaa17_612e74696d6554203c20622e74696d6554VEQEvQEu18SortedRangeOptionsi0ZQFl9__xtoHashFNbNeKxSQGtQGs__TQGpTQGfVQEga17_612e74696d6554203c20622e74696d6554VQEgi0ZQItZm@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa5_61203c2062VEQBwQBv18SortedRangeOptionsi0ZQCm11__xopEqualsMxFKxSQDvQDu__TQDrTQDhVQDia5_61203c2062VQDii0ZQEwZb@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa5_61203c2062VEQBwQBv18SortedRangeOptionsi0ZQCm4backMFNaNbNcNdNiNfZk@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa5_61203c2062VEQBwQBv18SortedRangeOptionsi0ZQCm4saveMFNaNbNdNiNfZSQDvQDu__TQDrTQDhVQDia5_61203c2062VQDii0ZQEw@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa5_61203c2062VEQBwQBv18SortedRangeOptionsi0ZQCm5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa5_61203c2062VEQBwQBv18SortedRangeOptionsi0ZQCm5frontMFNaNbNcNdNiNfZk@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa5_61203c2062VEQBwQBv18SortedRangeOptionsi0ZQCm6__ctorMFNaNbNcNiNfQCuZSQEaQDz__TQDwTQDmVQDna5_61203c2062VQDni0ZQFb@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa5_61203c2062VEQBwQBv18SortedRangeOptionsi0ZQCm6__initZ@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa5_61203c2062VEQBwQBv18SortedRangeOptionsi0ZQCm7opIndexMFNaNbNcNiNfmZk@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa5_61203c2062VEQBwQBv18SortedRangeOptionsi0ZQCm7opSliceMFNaNbNiNjNfmmZSQEaQDz__TQDwTQDmVQDna5_61203c2062VQDni0ZQFb@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa5_61203c2062VEQBwQBv18SortedRangeOptionsi0ZQCm7popBackMFNaNbNiNfZv@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa5_61203c2062VEQBwQBv18SortedRangeOptionsi0ZQCm7releaseMFNaNbNiNjNfZQCw@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa5_61203c2062VEQBwQBv18SortedRangeOptionsi0ZQCm8popFrontMFNaNbNiNfZv@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa5_61203c2062VEQBwQBv18SortedRangeOptionsi0ZQCm9__mixin246lengthMFNaNbNdNiNfZm@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa5_61203c2062VEQBwQBv18SortedRangeOptionsi0ZQCm9__xtoHashFNbNeKxSQDuQDt__TQDqTQDgVQDha5_61203c2062VQDhi0ZQEvZm@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa5_61203c2062VEQBwQBv18SortedRangeOptionsi0ZQCm__T10lowerBoundVEQDtQDs12SearchPolicyi2TiZQBnMFNaNbNiNfiZSQFiQFh__TQFeTQEuVQEva5_61203c2062VQEvi0ZQGj@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa5_61203c2062VEQBwQBv18SortedRangeOptionsi0ZQCm__T18getTransitionIndexVEQEbQEa12SearchPolicyi2SQEyQEx__TQEuTQEkVQEla5_61203c2062VQEli0ZQFz3geqTiZQDrMFNaNbNiNfiZm@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa5_61203c2062VEQBwQBv18SortedRangeOptionsi0ZQCm__T18getTransitionIndexVEQEbQEa12SearchPolicyi3SQEyQEx__TQEuTQEkVQEla5_61203c2062VQEli0ZQFz3geqTiZQDrMFNaNbNiNfiZm@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa5_61203c2062VEQBwQBv18SortedRangeOptionsi0ZQCm__T3geqTkTiZQjMFNaNbNiNfkiZb@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa6_61203c3d2062VEQByQBx18SortedRangeOptionsi0ZQCo11__xopEqualsMxFKxSQDxQDw__TQDtTQDjVQDka6_61203c3d2062VQDki0ZQFaZb@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa6_61203c3d2062VEQByQBx18SortedRangeOptionsi0ZQCo4backMFNaNbNcNdNiNfZk@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa6_61203c3d2062VEQByQBx18SortedRangeOptionsi0ZQCo4saveMFNaNbNdNiNfZSQDxQDw__TQDtTQDjVQDka6_61203c3d2062VQDki0ZQFa@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa6_61203c3d2062VEQByQBx18SortedRangeOptionsi0ZQCo5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa6_61203c3d2062VEQByQBx18SortedRangeOptionsi0ZQCo5frontMFNaNbNcNdNiNfZk@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa6_61203c3d2062VEQByQBx18SortedRangeOptionsi0ZQCo6__ctorMFNaNbNcNiNfQCwZSQEcQEb__TQDyTQDoVQDpa6_61203c3d2062VQDpi0ZQFf@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa6_61203c3d2062VEQByQBx18SortedRangeOptionsi0ZQCo6__initZ@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa6_61203c3d2062VEQByQBx18SortedRangeOptionsi0ZQCo7opIndexMFNaNbNcNiNfmZk@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa6_61203c3d2062VEQByQBx18SortedRangeOptionsi0ZQCo7opSliceMFNaNbNiNjNfmmZSQEcQEb__TQDyTQDoVQDpa6_61203c3d2062VQDpi0ZQFf@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa6_61203c3d2062VEQByQBx18SortedRangeOptionsi0ZQCo7popBackMFNaNbNiNfZv@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa6_61203c3d2062VEQByQBx18SortedRangeOptionsi0ZQCo7releaseMFNaNbNiNjNfZQCy@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa6_61203c3d2062VEQByQBx18SortedRangeOptionsi0ZQCo8popFrontMFNaNbNiNfZv@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa6_61203c3d2062VEQByQBx18SortedRangeOptionsi0ZQCo9__mixin246lengthMFNaNbNdNiNfZm@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa6_61203c3d2062VEQByQBx18SortedRangeOptionsi0ZQCo9__xtoHashFNbNeKxSQDwQDv__TQDsTQDiVQDja6_61203c3d2062VQDji0ZQEzZm@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa6_61203c3d2062VEQByQBx18SortedRangeOptionsi0ZQCo__T10lowerBoundVEQDvQDu12SearchPolicyi3TkZQBnMFNaNbNiNfkZSQFkQFj__TQFgTQEwVQExa6_61203c3d2062VQExi0ZQGn@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa6_61203c3d2062VEQByQBx18SortedRangeOptionsi0ZQCo__T10lowerBoundVEQDvQDu12SearchPolicyi3TyiZQBoMFNaNbNiNfyiZSQFmQFl__TQFiTQEyVQEza6_61203c3d2062VQEzi0ZQGp@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa6_61203c3d2062VEQByQBx18SortedRangeOptionsi0ZQCo__T18getTransitionIndexVEQEdQEc12SearchPolicyi3SQFaQEz__TQEwTQEmVQEna6_61203c3d2062VQEni0ZQGd3geqTkZQDtMFNaNbNiNfkZm@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa6_61203c3d2062VEQByQBx18SortedRangeOptionsi0ZQCo__T18getTransitionIndexVEQEdQEc12SearchPolicyi3SQFaQEz__TQEwTQEmVQEna6_61203c3d2062VQEni0ZQGd3geqTyiZQDuMFNaNbNiNfyiZm@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa6_61203c3d2062VEQByQBx18SortedRangeOptionsi0ZQCo__T3geqTkTkZQjMFNaNbNiNfkkZb@Base 12
+ _D3std5range__T11SortedRangeTAkVAyaa6_61203c3d2062VEQByQBx18SortedRangeOptionsi0ZQCo__T3geqTkTyiZQkMFNaNbNiNfkyiZb@Base 12
+ _D3std5range__T11SortedRangeTSQBc3uni__T13InversionListTSQCdQBb8GcPolicyZQBh__T9IntervalsTAkZQoSQDqQCo__TQCnTQCbZQCv8sanitizeMFNfZ9__lambda2VEQFkQFj18SortedRangeOptionsi0ZQGa11__xopEqualsMxFKxSQHjQHi__TQHfTQGvSQIaQGy__TQGxTQGlZQHfQEkMFNfZQEeVQDxi0ZQIzZb@Base 12
+ _D3std5range__T11SortedRangeTSQBc3uni__T13InversionListTSQCdQBb8GcPolicyZQBh__T9IntervalsTAkZQoSQDqQCo__TQCnTQCbZQCv8sanitizeMFNfZ9__lambda2VEQFkQFj18SortedRangeOptionsi0ZQGa4backMFNaNbNdNiNfZSQHjQGh17CodepointInterval@Base 12
+ _D3std5range__T11SortedRangeTSQBc3uni__T13InversionListTSQCdQBb8GcPolicyZQBh__T9IntervalsTAkZQoSQDqQCo__TQCnTQCbZQCv8sanitizeMFNfZ9__lambda2VEQFkQFj18SortedRangeOptionsi0ZQGa4saveMFNaNbNdNiNfZSQHjQHi__TQHfTQGvSQIaQGy__TQGxTQGlZQHfQEkMFNfZQEeVQDxi0ZQIz@Base 12
+ _D3std5range__T11SortedRangeTSQBc3uni__T13InversionListTSQCdQBb8GcPolicyZQBh__T9IntervalsTAkZQoSQDqQCo__TQCnTQCbZQCv8sanitizeMFNfZ9__lambda2VEQFkQFj18SortedRangeOptionsi0ZQGa5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std5range__T11SortedRangeTSQBc3uni__T13InversionListTSQCdQBb8GcPolicyZQBh__T9IntervalsTAkZQoSQDqQCo__TQCnTQCbZQCv8sanitizeMFNfZ9__lambda2VEQFkQFj18SortedRangeOptionsi0ZQGa5frontMFNaNbNdNiNfZSQHkQGi17CodepointInterval@Base 12
+ _D3std5range__T11SortedRangeTSQBc3uni__T13InversionListTSQCdQBb8GcPolicyZQBh__T9IntervalsTAkZQoSQDqQCo__TQCnTQCbZQCv8sanitizeMFNfZ9__lambda2VEQFkQFj18SortedRangeOptionsi0ZQGa6__ctorMFNaNbNcNiNfQGiZSQHoQHn__TQHkTQHaSQIfQHd__TQHcTQGqZQHkQEpMFNfZQEjVQEci0ZQJe@Base 12
+ _D3std5range__T11SortedRangeTSQBc3uni__T13InversionListTSQCdQBb8GcPolicyZQBh__T9IntervalsTAkZQoSQDqQCo__TQCnTQCbZQCv8sanitizeMFNfZ9__lambda2VEQFkQFj18SortedRangeOptionsi0ZQGa6__initZ@Base 12
+ _D3std5range__T11SortedRangeTSQBc3uni__T13InversionListTSQCdQBb8GcPolicyZQBh__T9IntervalsTAkZQoSQDqQCo__TQCnTQCbZQCv8sanitizeMFNfZ9__lambda2VEQFkQFj18SortedRangeOptionsi0ZQGa7opIndexMFNaNbNiNfmZSQHlQGj17CodepointInterval@Base 12
+ _D3std5range__T11SortedRangeTSQBc3uni__T13InversionListTSQCdQBb8GcPolicyZQBh__T9IntervalsTAkZQoSQDqQCo__TQCnTQCbZQCv8sanitizeMFNfZ9__lambda2VEQFkQFj18SortedRangeOptionsi0ZQGa7opSliceMFNaNbNiNjNfmmZSQHoQHn__TQHkTQHaSQIfQHd__TQHcTQGqZQHkQEpMFNfZQEjVQEci0ZQJe@Base 12
+ _D3std5range__T11SortedRangeTSQBc3uni__T13InversionListTSQCdQBb8GcPolicyZQBh__T9IntervalsTAkZQoSQDqQCo__TQCnTQCbZQCv8sanitizeMFNfZ9__lambda2VEQFkQFj18SortedRangeOptionsi0ZQGa7popBackMFNaNbNiNfZv@Base 12
+ _D3std5range__T11SortedRangeTSQBc3uni__T13InversionListTSQCdQBb8GcPolicyZQBh__T9IntervalsTAkZQoSQDqQCo__TQCnTQCbZQCv8sanitizeMFNfZ9__lambda2VEQFkQFj18SortedRangeOptionsi0ZQGa7releaseMFNaNbNiNjNfZQGk@Base 12
+ _D3std5range__T11SortedRangeTSQBc3uni__T13InversionListTSQCdQBb8GcPolicyZQBh__T9IntervalsTAkZQoSQDqQCo__TQCnTQCbZQCv8sanitizeMFNfZ9__lambda2VEQFkQFj18SortedRangeOptionsi0ZQGa8popFrontMFNaNbNiNfZv@Base 12
+ _D3std5range__T11SortedRangeTSQBc3uni__T13InversionListTSQCdQBb8GcPolicyZQBh__T9IntervalsTAkZQoSQDqQCo__TQCnTQCbZQCv8sanitizeMFNfZ9__lambda2VEQFkQFj18SortedRangeOptionsi0ZQGa9__mixin246lengthMFNaNbNdNiNfZm@Base 12
+ _D3std5range__T11SortedRangeTSQBc3uni__T13InversionListTSQCdQBb8GcPolicyZQBh__T9IntervalsTAkZQoSQDqQCo__TQCnTQCbZQCv8sanitizeMFNfZ9__lambda2VEQFkQFj18SortedRangeOptionsi0ZQGa9__xtoHashFNbNeKxSQHiQHh__TQHeTQGuSQHzQGx__TQGwTQGkZQHeQEjMFNfZQEdVQDwi0ZQIyZm@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQEv8internal14unicode_tables9CompEntryZQEgVQCxa5_61203c2062VEQHeQHd18SortedRangeOptionsi0ZQHu11__xopEqualsMxFKxSQJdQJc__TQIzTQIpVQGfa5_61203c2062VQDii0ZQKeZb@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQEv8internal14unicode_tables9CompEntryZQEgVQCxa5_61203c2062VEQHeQHd18SortedRangeOptionsi0ZQHu4backMFNaNbNdNiNfZyw@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQEv8internal14unicode_tables9CompEntryZQEgVQCxa5_61203c2062VEQHeQHd18SortedRangeOptionsi0ZQHu4saveMFNaNbNdNiNfZSQJdQJc__TQIzTQIpVQGfa5_61203c2062VQDii0ZQKe@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQEv8internal14unicode_tables9CompEntryZQEgVQCxa5_61203c2062VEQHeQHd18SortedRangeOptionsi0ZQHu5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQEv8internal14unicode_tables9CompEntryZQEgVQCxa5_61203c2062VEQHeQHd18SortedRangeOptionsi0ZQHu5frontMFNaNbNdNiNfZyw@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQEv8internal14unicode_tables9CompEntryZQEgVQCxa5_61203c2062VEQHeQHd18SortedRangeOptionsi0ZQHu6__ctorMFNaNbNcNiNfQIcZSQJiQJh__TQJeTQIuVQGka5_61203c2062VQDni0ZQKj@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQEv8internal14unicode_tables9CompEntryZQEgVQCxa5_61203c2062VEQHeQHd18SortedRangeOptionsi0ZQHu6__initZ@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQEv8internal14unicode_tables9CompEntryZQEgVQCxa5_61203c2062VEQHeQHd18SortedRangeOptionsi0ZQHu7opIndexMFNaNbNiNfmZyw@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQEv8internal14unicode_tables9CompEntryZQEgVQCxa5_61203c2062VEQHeQHd18SortedRangeOptionsi0ZQHu7opSliceMFNaNbNiNjNfmmZSQJiQJh__TQJeTQIuVQGka5_61203c2062VQDni0ZQKj@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQEv8internal14unicode_tables9CompEntryZQEgVQCxa5_61203c2062VEQHeQHd18SortedRangeOptionsi0ZQHu7popBackMFNaNbNiNfZv@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQEv8internal14unicode_tables9CompEntryZQEgVQCxa5_61203c2062VEQHeQHd18SortedRangeOptionsi0ZQHu7releaseMFNaNbNiNjNfZQIe@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQEv8internal14unicode_tables9CompEntryZQEgVQCxa5_61203c2062VEQHeQHd18SortedRangeOptionsi0ZQHu8popFrontMFNaNbNiNfZv@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQEv8internal14unicode_tables9CompEntryZQEgVQCxa5_61203c2062VEQHeQHd18SortedRangeOptionsi0ZQHu9__mixin246lengthMFNaNbNdNiNfZm@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQEv8internal14unicode_tables9CompEntryZQEgVQCxa5_61203c2062VEQHeQHd18SortedRangeOptionsi0ZQHu9__xtoHashFNbNeKxSQJcQJb__TQIyTQIoVQGea5_61203c2062VQDhi0ZQKdZm@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQEv8internal14unicode_tables9CompEntryZQEgVQCxa5_61203c2062VEQHeQHd18SortedRangeOptionsi0ZQHu__T10lowerBoundVEQJbQJa12SearchPolicyi3TwZQBnMFNaNbNiNfwZSQKqQKp__TQKmTQKcVQHsa5_61203c2062VQEvi0ZQLr@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQEv8internal14unicode_tables9CompEntryZQEgVQCxa5_61203c2062VEQHeQHd18SortedRangeOptionsi0ZQHu__T18getTransitionIndexVEQJjQJi12SearchPolicyi3SQKgQKf__TQKcTQJsVQHia5_61203c2062VQEli0ZQLh3geqTwZQDrMFNaNbNiNfwZm@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQEv8internal14unicode_tables9CompEntryZQEgVQCxa5_61203c2062VEQHeQHd18SortedRangeOptionsi0ZQHu__T3geqTywTwZQkMFNaNbNiNfywwZb@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv6blocks3tabFNaNdNfZQEgTaZQCbFMxAaZ9__lambda2VEQKbQKa18SortedRangeOptionsi0ZQKr11__xopEqualsMxFKxSQMaQLz__TQLwTQLmSQMrQFw__TQFvS_DQNgQIjQIdQFiQFeQFdTaZQGwFMxQEvZQEwVQEpi0ZQOiZb@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv6blocks3tabFNaNdNfZQEgTaZQCbFMxAaZ9__lambda2VEQKbQKa18SortedRangeOptionsi0ZQKr4backMFNaNbNdNiNfZyAa@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv6blocks3tabFNaNdNfZQEgTaZQCbFMxAaZ9__lambda2VEQKbQKa18SortedRangeOptionsi0ZQKr4saveMFNaNbNdNiNfZSQMaQLz__TQLwTQLmSQMrQFw__TQFvS_DQNgQIjQIdQFiQFeQFdTaZQGwFMxQEvZQEwVQEpi0ZQOi@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv6blocks3tabFNaNdNfZQEgTaZQCbFMxAaZ9__lambda2VEQKbQKa18SortedRangeOptionsi0ZQKr5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv6blocks3tabFNaNdNfZQEgTaZQCbFMxAaZ9__lambda2VEQKbQKa18SortedRangeOptionsi0ZQKr5frontMFNaNbNdNiNfZyAa@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv6blocks3tabFNaNdNfZQEgTaZQCbFMxAaZ9__lambda2VEQKbQKa18SortedRangeOptionsi0ZQKr6__ctorMFNaNbNcNiNfQKzZSQMfQMe__TQMbTQLrSQMwQGb__TQGaS_DQNlQIoQIiQFnQFjQFiTaZQHbFMxQFaZQFbVQEui0ZQOn@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv6blocks3tabFNaNdNfZQEgTaZQCbFMxAaZ9__lambda2VEQKbQKa18SortedRangeOptionsi0ZQKr6__initZ@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv6blocks3tabFNaNdNfZQEgTaZQCbFMxAaZ9__lambda2VEQKbQKa18SortedRangeOptionsi0ZQKr7opIndexMFNaNbNiNfmZyAa@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv6blocks3tabFNaNdNfZQEgTaZQCbFMxAaZ9__lambda2VEQKbQKa18SortedRangeOptionsi0ZQKr7opSliceMFNaNbNiNjNfmmZSQMfQMe__TQMbTQLrSQMwQGb__TQGaS_DQNlQIoQIiQFnQFjQFiTaZQHbFMxQFaZQFbVQEui0ZQOn@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv6blocks3tabFNaNdNfZQEgTaZQCbFMxAaZ9__lambda2VEQKbQKa18SortedRangeOptionsi0ZQKr7popBackMFNaNbNiNfZv@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv6blocks3tabFNaNdNfZQEgTaZQCbFMxAaZ9__lambda2VEQKbQKa18SortedRangeOptionsi0ZQKr7releaseMFNaNbNiNjNfZQLb@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv6blocks3tabFNaNdNfZQEgTaZQCbFMxAaZ9__lambda2VEQKbQKa18SortedRangeOptionsi0ZQKr8popFrontMFNaNbNiNfZv@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv6blocks3tabFNaNdNfZQEgTaZQCbFMxAaZ9__lambda2VEQKbQKa18SortedRangeOptionsi0ZQKr9__mixin246lengthMFNaNbNdNiNfZm@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv6blocks3tabFNaNdNfZQEgTaZQCbFMxAaZ9__lambda2VEQKbQKa18SortedRangeOptionsi0ZQKr9__xtoHashFNbNeKxSQLzQLy__TQLvTQLlSQMqQFv__TQFuS_DQNfQIiQIcQFhQFdQFcTaZQGvFMxQEuZQEvVQEoi0ZQOhZm@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv6blocks3tabFNaNdNfZQEgTaZQCbFMxAaZ9__lambda2VEQKbQKa18SortedRangeOptionsi0ZQKr__T10lowerBoundVEQLyQLx12SearchPolicyi3TAxaZQBpMFNaNfQnZSQNmQNl__TQNiTQMySQOdQHi__TQHhS_DQOsQJvQJpQGuQGqQGpTaZQIiFMxQGhZQGiVQGbi0ZQPu@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv6blocks3tabFNaNdNfZQEgTaZQCbFMxAaZ9__lambda2VEQKbQKa18SortedRangeOptionsi0ZQKr__T18getTransitionIndexVEQMgQMf12SearchPolicyi3SQNdQNc__TQMzTQMpSQNuQGz__TQGyS_DQOjQJmQJgQGlQGhQGgTaZQHzFMxQFyZQFzVQFsi0ZQPl3geqTAxaZQFaMFNaNfQnZm@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv6blocks3tabFNaNdNfZQEgTaZQCbFMxAaZ9__lambda2VEQKbQKa18SortedRangeOptionsi0ZQKr__T3geqTQIaTAxaZQnMFNaNfQIqQpZb@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv7scripts3tabFNaNbNdNiNfZQElTaZQCgFMxAaZ9__lambda2VEQKgQKf18SortedRangeOptionsi0ZQKw11__xopEqualsMxFKxSQMfQMe__TQMbTQLrSQMwQGb__TQGaS_DQNlQIoQIiQFnQFiQFhTaZQHbFMxQEvZQEwVQEpi0ZQOnZb@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv7scripts3tabFNaNbNdNiNfZQElTaZQCgFMxAaZ9__lambda2VEQKgQKf18SortedRangeOptionsi0ZQKw4backMFNaNbNdNiNfZyAa@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv7scripts3tabFNaNbNdNiNfZQElTaZQCgFMxAaZ9__lambda2VEQKgQKf18SortedRangeOptionsi0ZQKw4saveMFNaNbNdNiNfZSQMfQMe__TQMbTQLrSQMwQGb__TQGaS_DQNlQIoQIiQFnQFiQFhTaZQHbFMxQEvZQEwVQEpi0ZQOn@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv7scripts3tabFNaNbNdNiNfZQElTaZQCgFMxAaZ9__lambda2VEQKgQKf18SortedRangeOptionsi0ZQKw5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv7scripts3tabFNaNbNdNiNfZQElTaZQCgFMxAaZ9__lambda2VEQKgQKf18SortedRangeOptionsi0ZQKw5frontMFNaNbNdNiNfZyAa@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv7scripts3tabFNaNbNdNiNfZQElTaZQCgFMxAaZ9__lambda2VEQKgQKf18SortedRangeOptionsi0ZQKw6__ctorMFNaNbNcNiNfQLeZSQMkQMj__TQMgTQLwSQNbQGg__TQGfS_DQNqQItQInQFsQFnQFmTaZQHgFMxQFaZQFbVQEui0ZQOs@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv7scripts3tabFNaNbNdNiNfZQElTaZQCgFMxAaZ9__lambda2VEQKgQKf18SortedRangeOptionsi0ZQKw6__initZ@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv7scripts3tabFNaNbNdNiNfZQElTaZQCgFMxAaZ9__lambda2VEQKgQKf18SortedRangeOptionsi0ZQKw7opIndexMFNaNbNiNfmZyAa@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv7scripts3tabFNaNbNdNiNfZQElTaZQCgFMxAaZ9__lambda2VEQKgQKf18SortedRangeOptionsi0ZQKw7opSliceMFNaNbNiNjNfmmZSQMkQMj__TQMgTQLwSQNbQGg__TQGfS_DQNqQItQInQFsQFnQFmTaZQHgFMxQFaZQFbVQEui0ZQOs@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv7scripts3tabFNaNbNdNiNfZQElTaZQCgFMxAaZ9__lambda2VEQKgQKf18SortedRangeOptionsi0ZQKw7popBackMFNaNbNiNfZv@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv7scripts3tabFNaNbNdNiNfZQElTaZQCgFMxAaZ9__lambda2VEQKgQKf18SortedRangeOptionsi0ZQKw7releaseMFNaNbNiNjNfZQLg@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv7scripts3tabFNaNbNdNiNfZQElTaZQCgFMxAaZ9__lambda2VEQKgQKf18SortedRangeOptionsi0ZQKw8popFrontMFNaNbNiNfZv@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv7scripts3tabFNaNbNdNiNfZQElTaZQCgFMxAaZ9__lambda2VEQKgQKf18SortedRangeOptionsi0ZQKw9__mixin246lengthMFNaNbNdNiNfZm@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv7scripts3tabFNaNbNdNiNfZQElTaZQCgFMxAaZ9__lambda2VEQKgQKf18SortedRangeOptionsi0ZQKw9__xtoHashFNbNeKxSQMeQMd__TQMaTQLqSQMvQGa__TQFzS_DQNkQInQIhQFmQFhQFgTaZQHaFMxQEuZQEvVQEoi0ZQOmZm@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv7scripts3tabFNaNbNdNiNfZQElTaZQCgFMxAaZ9__lambda2VEQKgQKf18SortedRangeOptionsi0ZQKw__T10lowerBoundVEQMdQMc12SearchPolicyi3TAxaZQBpMFNaNfQnZSQNrQNq__TQNnTQNdSQOiQHn__TQHmS_DQOxQKaQJuQGzQGuQGtTaZQInFMxQGhZQGiVQGbi0ZQPz@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv7scripts3tabFNaNbNdNiNfZQElTaZQCgFMxAaZ9__lambda2VEQKgQKf18SortedRangeOptionsi0ZQKw__T18getTransitionIndexVEQMlQMk12SearchPolicyi3SQNiQNh__TQNeTQMuSQNzQHe__TQHdS_DQOoQJrQJlQGqQGlQGkTaZQIeFMxQFyZQFzVQFsi0ZQPq3geqTAxaZQFaMFNaNfQnZm@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv7scripts3tabFNaNbNdNiNfZQElTaZQCgFMxAaZ9__lambda2VEQKgQKf18SortedRangeOptionsi0ZQKw__T3geqTQIfTAxaZQnMFNaNfQIvQpZb@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv8uniProps3tabFNaNdNfZQEiTaZQCdFMxAaZ9__lambda2VEQKdQKc18SortedRangeOptionsi0ZQKt11__xopEqualsMxFKxSQMcQMb__TQLyTQLoSQMtQFy__TQFxS_DQNiQIlQIfQFkQFeQFdTaZQGyFMxQEvZQEwVQEpi0ZQOkZb@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv8uniProps3tabFNaNdNfZQEiTaZQCdFMxAaZ9__lambda2VEQKdQKc18SortedRangeOptionsi0ZQKt4backMFNaNbNdNiNfZyAa@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv8uniProps3tabFNaNdNfZQEiTaZQCdFMxAaZ9__lambda2VEQKdQKc18SortedRangeOptionsi0ZQKt4saveMFNaNbNdNiNfZSQMcQMb__TQLyTQLoSQMtQFy__TQFxS_DQNiQIlQIfQFkQFeQFdTaZQGyFMxQEvZQEwVQEpi0ZQOk@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv8uniProps3tabFNaNdNfZQEiTaZQCdFMxAaZ9__lambda2VEQKdQKc18SortedRangeOptionsi0ZQKt5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv8uniProps3tabFNaNdNfZQEiTaZQCdFMxAaZ9__lambda2VEQKdQKc18SortedRangeOptionsi0ZQKt5frontMFNaNbNdNiNfZyAa@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv8uniProps3tabFNaNdNfZQEiTaZQCdFMxAaZ9__lambda2VEQKdQKc18SortedRangeOptionsi0ZQKt6__ctorMFNaNbNcNiNfQLbZSQMhQMg__TQMdTQLtSQMyQGd__TQGcS_DQNnQIqQIkQFpQFjQFiTaZQHdFMxQFaZQFbVQEui0ZQOp@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv8uniProps3tabFNaNdNfZQEiTaZQCdFMxAaZ9__lambda2VEQKdQKc18SortedRangeOptionsi0ZQKt6__initZ@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv8uniProps3tabFNaNdNfZQEiTaZQCdFMxAaZ9__lambda2VEQKdQKc18SortedRangeOptionsi0ZQKt7opIndexMFNaNbNiNfmZyAa@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv8uniProps3tabFNaNdNfZQEiTaZQCdFMxAaZ9__lambda2VEQKdQKc18SortedRangeOptionsi0ZQKt7opSliceMFNaNbNiNjNfmmZSQMhQMg__TQMdTQLtSQMyQGd__TQGcS_DQNnQIqQIkQFpQFjQFiTaZQHdFMxQFaZQFbVQEui0ZQOp@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv8uniProps3tabFNaNdNfZQEiTaZQCdFMxAaZ9__lambda2VEQKdQKc18SortedRangeOptionsi0ZQKt7popBackMFNaNbNiNfZv@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv8uniProps3tabFNaNdNfZQEiTaZQCdFMxAaZ9__lambda2VEQKdQKc18SortedRangeOptionsi0ZQKt7releaseMFNaNbNiNjNfZQLd@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv8uniProps3tabFNaNdNfZQEiTaZQCdFMxAaZ9__lambda2VEQKdQKc18SortedRangeOptionsi0ZQKt8popFrontMFNaNbNiNfZv@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv8uniProps3tabFNaNdNfZQEiTaZQCdFMxAaZ9__lambda2VEQKdQKc18SortedRangeOptionsi0ZQKt9__mixin246lengthMFNaNbNdNiNfZm@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv8uniProps3tabFNaNdNfZQEiTaZQCdFMxAaZ9__lambda2VEQKdQKc18SortedRangeOptionsi0ZQKt9__xtoHashFNbNeKxSQMbQMa__TQLxTQLnSQMsQFx__TQFwS_DQNhQIkQIeQFjQFdQFcTaZQGxFMxQEuZQEvVQEoi0ZQOjZm@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv8uniProps3tabFNaNdNfZQEiTaZQCdFMxAaZ9__lambda2VEQKdQKc18SortedRangeOptionsi0ZQKt__T10lowerBoundVEQMaQLz12SearchPolicyi3TAxaZQBpMFNaNfQnZSQNoQNn__TQNkTQNaSQOfQHk__TQHjS_DQOuQJxQJrQGwQGqQGpTaZQIkFMxQGhZQGiVQGbi0ZQPw@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv8uniProps3tabFNaNdNfZQEiTaZQCdFMxAaZ9__lambda2VEQKdQKc18SortedRangeOptionsi0ZQKt__T18getTransitionIndexVEQMiQMh12SearchPolicyi3SQNfQNe__TQNbTQMrSQNwQHb__TQHaS_DQOlQJoQJiQGnQGhQGgTaZQIbFMxQFyZQFzVQFsi0ZQPn3geqTAxaZQFaMFNaNfQnZm@Base 12
+ _D3std5range__T11SortedRangeTSQBc9algorithm9iteration__T9MapResultSQCn10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEx8internal14unicode_tables15UnicodePropertyZQEpSQGv3uni__T14findUnicodeSetS_DQHyQDbQCv8uniProps3tabFNaNdNfZQEiTaZQCdFMxAaZ9__lambda2VEQKdQKc18SortedRangeOptionsi0ZQKt__T3geqTQIcTAxaZQnMFNaNfQIsQpZb@Base 12
+ _D3std5range__T11takeExactlyTSQBcQBb__T6RepeatTaZQkZQBlFNaNbNiNfQBjmZSQCqQCp__T4TakeTQCeZQk@Base 12
+ _D3std5range__T12assumeSortedVAyaa15_612e6e616d65203c20622e6e616d65TASQCq5regex8internal2ir10NamedGroupZQDlFNaNbNiNfQBwZSQEpQEo__T11SortedRangeTQCyVQEoa15_612e6e616d65203c20622e6e616d65VEQHdQHc18SortedRangeOptionsi0ZQDi@Base 12
+ _D3std5range__T12assumeSortedVAyaa17_612e74696d6554203c20622e74696d6554TASQCu8datetime8timezone13PosixTimeZone10LeapSecondZQEeFNaNbNiNfQClZSQFiQFh__T11SortedRangeTQDnVQFha17_612e74696d6554203c20622e74696d6554VEQIaQHz18SortedRangeOptionsi0ZQDm@Base 12
+ _D3std5range__T12assumeSortedVAyaa17_612e74696d6554203c20622e74696d6554TASQCu8datetime8timezone13PosixTimeZone14TempTransitionZQEiFNaNbNiNfQCpZSQFmQFl__T11SortedRangeTQDrVQFla17_612e74696d6554203c20622e74696d6554VEQIeQId18SortedRangeOptionsi0ZQDm@Base 12
+ _D3std5range__T12assumeSortedVAyaa5_61203c2062TAQsZQBkFNaNbNiNfQqZSQCnQCm__T11SortedRangeTQBrVQCma5_61203c2062VEQEgQEf18SortedRangeOptionsi0ZQCn@Base 12
+ _D3std5range__T12assumeSortedVAyaa5_61203c2062TAkZQBjFNaNbNiNfQpZSQCmQCl__T11SortedRangeTQBqVQCla5_61203c2062VEQEfQEe18SortedRangeOptionsi0ZQCn@Base 12
+ _D3std5range__T12assumeSortedVAyaa5_61203c2062TSQBu9algorithm9iteration__T9MapResultSQDf10functional__T8unaryFunVQDfa5_612e726873VQDwa1_61ZQBkTAySQFo8internal14unicode_tables9CompEntryZQEhZQGsFNaNbNiNfQFyZSQHwQHv__T11SortedRangeTQHaVQHva5_61203c2062VEQJpQJo18SortedRangeOptionsi0ZQCn@Base 12
+ _D3std5range__T12assumeSortedVAyaa6_61203c3d2062TAkZQBlFNaNbNiNfQpZSQCoQCn__T11SortedRangeTQBqVQCna6_61203c3d2062VEQEjQEi18SortedRangeOptionsi0ZQCp@Base 12
+ _D3std5range__T4TakeTSQu3utf__T5byUTFTwVEQBn8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDiTSQEqQDx__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFlFNcQCfZ6ResultZQGt11__xopEqualsMxFKxSQIcQIb__TQHyTQHwZQIgZb@Base 12
+ _D3std5range__T4TakeTSQu3utf__T5byUTFTwVEQBn8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDiTSQEqQDx__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFlFNcQCfZ6ResultZQGt4saveMFNaNbNdNiNfZSQIcQIb__TQHyTQHwZQIg@Base 12
+ _D3std5range__T4TakeTSQu3utf__T5byUTFTwVEQBn8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDiTSQEqQDx__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFlFNcQCfZ6ResultZQGt5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std5range__T4TakeTSQu3utf__T5byUTFTwVEQBn8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDiTSQEqQDx__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFlFNcQCfZ6ResultZQGt5frontMFNaNbNdNiNfZw@Base 12
+ _D3std5range__T4TakeTSQu3utf__T5byUTFTwVEQBn8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDiTSQEqQDx__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFlFNcQCfZ6ResultZQGt6__initZ@Base 12
+ _D3std5range__T4TakeTSQu3utf__T5byUTFTwVEQBn8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDiTSQEqQDx__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFlFNcQCfZ6ResultZQGt8popFrontMFNaNbNiNfZv@Base 12
+ _D3std5range__T4TakeTSQu3utf__T5byUTFTwVEQBn8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDiTSQEqQDx__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFlFNcQCfZ6ResultZQGt9__xtoHashFNbNeKxSQIbQIa__TQHxTQHvZQIfZm@Base 12
+ _D3std5range__T4TakeTSQu3utf__T5byUTFTwVEQBn8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDiTSQEqQDx__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFlFNcQCfZ6ResultZQGt9maxLengthMxFNaNbNdNiNfZm@Base 12
+ _D3std5range__T4TakeTSQu3utf__T5byUTFTwVEQBn8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDiTSQEqQDx__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFlFNcQCfZ6ResultZQGt9moveFrontMFNaNbNiNfZw@Base 12
+ _D3std5range__T4TakeTSQuQs__T6RepeatTaZQkZQBb4backMFNaNbNdNiNfZa@Base 12
+ _D3std5range__T4TakeTSQuQs__T6RepeatTaZQkZQBb4saveMFNaNbNdNiNfZSQCkQCj__TQCgTQCeZQCo@Base 12
+ _D3std5range__T4TakeTSQuQs__T6RepeatTaZQkZQBb5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std5range__T4TakeTSQuQs__T6RepeatTaZQkZQBb5frontMFNaNbNdNiNfZa@Base 12
+ _D3std5range__T4TakeTSQuQs__T6RepeatTaZQkZQBb6__initZ@Base 12
+ _D3std5range__T4TakeTSQuQs__T6RepeatTaZQkZQBb6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std5range__T4TakeTSQuQs__T6RepeatTaZQkZQBb6moveAtMFNaNbNiNfmZa@Base 12
+ _D3std5range__T4TakeTSQuQs__T6RepeatTaZQkZQBb7opIndexMFNaNbNiNfmZa@Base 12
+ _D3std5range__T4TakeTSQuQs__T6RepeatTaZQkZQBb7popBackMFNaNbNiNfZv@Base 12
+ _D3std5range__T4TakeTSQuQs__T6RepeatTaZQkZQBb8moveBackMFNaNbNiNfZa@Base 12
+ _D3std5range__T4TakeTSQuQs__T6RepeatTaZQkZQBb8popFrontMFNaNbNiNfZv@Base 12
+ _D3std5range__T4TakeTSQuQs__T6RepeatTaZQkZQBb9maxLengthMxFNaNbNdNiNfZm@Base 12
+ _D3std5range__T4TakeTSQuQs__T6RepeatTaZQkZQBb9moveFrontMFNaNbNiNfZa@Base 12
+ _D3std5range__T4TakeTSQuQs__T6RepeatTaZQkZQBb__T7opSliceZQjMFNaNbNiNfmmZSQCtQCs__TQCpTQCnZQCx@Base 12
+ _D3std5range__T4dropTSQu5regex8internal6parser__T6ParserTAyaTSQCiQBpQBmQBg7CodeGenZQBiZQCuFNaNfQCwmZQDb@Base 12
+ _D3std5range__T4iotaTmTmZQkFNaNbNiNfmmZSQBmQBl__TQBiTmTmZQBqFmmZ6Result@Base 12
+ _D3std5range__T4iotaTmTmZQkFmmZ6Result4backMNgFNaNbNdNiNfZNgm@Base 12
+ _D3std5range__T4iotaTmTmZQkFmmZ6Result4saveMFNaNbNdNiNfZSQCdQCc__TQBzTmTmZQChFmmZQBy@Base 12
+ _D3std5range__T4iotaTmTmZQkFmmZ6Result5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std5range__T4iotaTmTmZQkFmmZ6Result5frontMNgFNaNbNdNiNfZNgm@Base 12
+ _D3std5range__T4iotaTmTmZQkFmmZ6Result6__ctorMFNaNbNcNiNfmmZSQChQCg__TQCdTmTmZQClFmmZQCc@Base 12
+ _D3std5range__T4iotaTmTmZQkFmmZ6Result6__initZ@Base 12
+ _D3std5range__T4iotaTmTmZQkFmmZ6Result6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std5range__T4iotaTmTmZQkFmmZ6Result7opIndexMNgFNaNbNiNfmZNgm@Base 12
+ _D3std5range__T4iotaTmTmZQkFmmZ6Result7opSliceMNgFNaNbNiNfZNgSQCiQCh__TQCeTmTmZQCmFmmZQCd@Base 12
+ _D3std5range__T4iotaTmTmZQkFmmZ6Result7opSliceMNgFNaNbNiNfmmZNgSQCkQCj__TQCgTmTmZQCoFmmZQCf@Base 12
+ _D3std5range__T4iotaTmTmZQkFmmZ6Result7popBackMFNaNbNiNfZv@Base 12
+ _D3std5range__T4iotaTmTmZQkFmmZ6Result8containsMFNaNbNiNfmZb@Base 12
+ _D3std5range__T4iotaTmTmZQkFmmZ6Result8popFrontMFNaNbNiNfZv@Base 12
+ _D3std5range__T4iotaTmTmZQkFmmZ6Result__T13opBinaryRightVAyaa2_696eZQBbMxFNaNbNiNfmZb@Base 12
+ _D3std5range__T4iotaTmTxmZQlFNaNbNiNfmxmZSQBoQBn__TQBkTmTxmZQBtFmxmZ6Result@Base 12
+ _D3std5range__T4iotaTmTxmZQlFmxmZ6Result4backMNgFNaNbNdNiNfZNgm@Base 12
+ _D3std5range__T4iotaTmTxmZQlFmxmZ6Result4saveMFNaNbNdNiNfZSQCfQCe__TQCbTmTxmZQCkFmxmZQCa@Base 12
+ _D3std5range__T4iotaTmTxmZQlFmxmZ6Result5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std5range__T4iotaTmTxmZQlFmxmZ6Result5frontMNgFNaNbNdNiNfZNgm@Base 12
+ _D3std5range__T4iotaTmTxmZQlFmxmZ6Result6__ctorMFNaNbNcNiNfmmZSQCjQCi__TQCfTmTxmZQCoFmxmZQCe@Base 12
+ _D3std5range__T4iotaTmTxmZQlFmxmZ6Result6__initZ@Base 12
+ _D3std5range__T4iotaTmTxmZQlFmxmZ6Result6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std5range__T4iotaTmTxmZQlFmxmZ6Result7opIndexMNgFNaNbNiNfmZNgm@Base 12
+ _D3std5range__T4iotaTmTxmZQlFmxmZ6Result7opSliceMNgFNaNbNiNfZNgSQCkQCj__TQCgTmTxmZQCpFmxmZQCf@Base 12
+ _D3std5range__T4iotaTmTxmZQlFmxmZ6Result7opSliceMNgFNaNbNiNfmmZNgSQCmQCl__TQCiTmTxmZQCrFmxmZQCh@Base 12
+ _D3std5range__T4iotaTmTxmZQlFmxmZ6Result7popBackMFNaNbNiNfZv@Base 12
+ _D3std5range__T4iotaTmTxmZQlFmxmZ6Result8containsMFNaNbNiNfmZb@Base 12
+ _D3std5range__T4iotaTmTxmZQlFmxmZ6Result8popFrontMFNaNbNiNfZv@Base 12
+ _D3std5range__T4iotaTmTxmZQlFmxmZ6Result__T13opBinaryRightVAyaa2_696eZQBbMxFNaNbNiNfmZb@Base 12
+ _D3std5range__T4iotaTmZQiFNaNbNiNfmZSQBjQBi__TQBfTmTmZQBnFmmZ6Result@Base 12
+ _D3std5range__T4onlyTaZQiFNaNbNiNfaZSQBjQBi__T10OnlyResultTaZQp@Base 12
+ _D3std5range__T4takeTAhZQjFNaNbNiNfQomZQs@Base 12
+ _D3std5range__T4takeTSQu3utf__T5byUTFTwVEQBn8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDiTSQEqQDx__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFlFNcQCfZ6ResultZQGtFNaNbNiNfQGzmZSQHyQHx__T4TakeTQHuZQk@Base 12
+ _D3std5range__T4takeTSQuQs__T6RepeatTaZQkZQBbFNaNbNiNfQBhmZSQCgQCf__T4TakeTQCcZQk@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCsQCr__T10OnlyResultTaZQpTSQDuQDa__TQCzTAxaZQDhFQiZQCsZQEjFNaNbNiNfQEoQCuQBvZSQFtQFs__TQFpTQFmTQDtTQCvZQGfFQGcQEiQDjZ6Result@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCsQCr__T10OnlyResultTaZQpTSQDuQDa__TQCzTAxaZQDhFQiZQCsZQEjFQEgQCmQBnZ6Result11__xopEqualsMxFKxSQGkQGj__TQGgTQGdTQEkTQDmZQGwFQGtQEzQEaZQCnZb@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCsQCr__T10OnlyResultTaZQpTSQDuQDa__TQCzTAxaZQDhFQiZQCsZQEjFQEgQCmQBnZ6Result4backMFNaNbNdNiNfZxa@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCsQCr__T10OnlyResultTaZQpTSQDuQDa__TQCzTAxaZQDhFQiZQCsZQEjFQEgQCmQBnZ6Result4saveMFNaNbNdNiNfZSQGkQGj__TQGgTQGdTQEkTQDmZQGwFQGtQEzQEaZQCn@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCsQCr__T10OnlyResultTaZQpTSQDuQDa__TQCzTAxaZQDhFQiZQCsZQEjFQEgQCmQBnZ6Result5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCsQCr__T10OnlyResultTaZQpTSQDuQDa__TQCzTAxaZQDhFQiZQCsZQEjFQEgQCmQBnZ6Result5frontMFNaNbNdNiNfZxa@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCsQCr__T10OnlyResultTaZQpTSQDuQDa__TQCzTAxaZQDhFQiZQCsZQEjFQEgQCmQBnZ6Result6__ctorMFNaNbNcNiNfQFqQDwQCxZSQGvQGu__TQGrTQGoTQEvTQDxZQHhFQHeQFkQElZQCy@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCsQCr__T10OnlyResultTaZQpTSQDuQDa__TQCzTAxaZQDhFQiZQCsZQEjFQEgQCmQBnZ6Result6__initZ@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCsQCr__T10OnlyResultTaZQpTSQDuQDa__TQCzTAxaZQDhFQiZQCsZQEjFQEgQCmQBnZ6Result6fixRefFNaNbNiNfxaZxa@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCsQCr__T10OnlyResultTaZQpTSQDuQDa__TQCzTAxaZQDhFQiZQCsZQEjFQEgQCmQBnZ6Result6lengthMFNaNbNdNiNfZm@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCsQCr__T10OnlyResultTaZQpTSQDuQDa__TQCzTAxaZQDhFQiZQCsZQEjFQEgQCmQBnZ6Result6moveAtMFNaNbNiNfmZxa@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCsQCr__T10OnlyResultTaZQpTSQDuQDa__TQCzTAxaZQDhFQiZQCsZQEjFQEgQCmQBnZ6Result7opIndexMFNaNbNiNfmZxa@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCsQCr__T10OnlyResultTaZQpTSQDuQDa__TQCzTAxaZQDhFQiZQCsZQEjFQEgQCmQBnZ6Result7opSliceMFNaNbNiNjNfmmZSQGpQGo__TQGlTQGiTQEpTQDrZQHbFQGyQFeQEfZQCs@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCsQCr__T10OnlyResultTaZQpTSQDuQDa__TQCzTAxaZQDhFQiZQCsZQEjFQEgQCmQBnZ6Result7popBackMFNaNbNiNfZv@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCsQCr__T10OnlyResultTaZQpTSQDuQDa__TQCzTAxaZQDhFQiZQCsZQEjFQEgQCmQBnZ6Result8moveBackMFNaNbNiNfZxa@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCsQCr__T10OnlyResultTaZQpTSQDuQDa__TQCzTAxaZQDhFQiZQCsZQEjFQEgQCmQBnZ6Result8popFrontMFNaNbNiNfZv@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCsQCr__T10OnlyResultTaZQpTSQDuQDa__TQCzTAxaZQDhFQiZQCsZQEjFQEgQCmQBnZ6Result9__xtoHashFNbNeKxSQGjQGi__TQGfTQGcTQEjTQDlZQGvFQGsQEyQDzZQCmZm@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCsQCr__T10OnlyResultTaZQpTSQDuQDa__TQCzTAxaZQDhFQiZQCsZQEjFQEgQCmQBnZ6Result9moveFrontMFNaNbNiNfZxa@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFNaNbNiNfQDqQBvQDwZSQEvQEu__TQErTQEoTQCuTQEwZQFhFQFeQDjQFkZ6Result@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result11__xopEqualsMxFKxSQFmQFl__TQFiTQFfTQDlTQFnZQFyFQFvQEaQGbZQCnZb@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result4backMFNaNbNdNiNfZxa@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result4saveMFNaNbNdNiNfZSQFmQFl__TQFiTQFfTQDlTQFnZQFyFQFvQEaQGbZQCn@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result5frontMFNaNbNdNiNfZxa@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result6__ctorMFNaNbNcNiNfQEsQCxQEyZSQFxQFw__TQFtTQFqTQDwTQFyZQGjFQGgQElQGmZQCy@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result6__initZ@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result6fixRefFNaNbNiNfxaZxa@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result6lengthMFNaNbNdNiNfZm@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result6moveAtMFNaNbNiNfmZxa@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result7opIndexMFNaNbNiNfmZxa@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result7opSliceMFNaNbNiNjNfmmZSQFrQFq__TQFnTQFkTQDqTQFsZQGdFQGaQEfQGgZQCs@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result7popBackMFNaNbNiNfZv@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result8moveBackMFNaNbNiNfZxa@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result8popFrontMFNaNbNiNfZv@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result9__xtoHashFNbNeKxSQFlQFk__TQFhTQFeTQDkTQFmZQFxFQFuQDzQGaZQCmZm@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result9moveFrontMFNaNbNiNfZxa@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFNaNbNiNfQDqQBvQDwZSQEvQEu__TQErTQEoTQCuTQEwZQFhFQFeQDjQFkZ6Result@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result11__xopEqualsMxFKxSQFmQFl__TQFiTQFfTQDlTQFnZQFyFQFvQEaQGbZQCnZb@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result4backMFNaNbNdNiNfZxa@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result4saveMFNaNbNdNiNfZSQFmQFl__TQFiTQFfTQDlTQFnZQFyFQFvQEaQGbZQCn@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result5frontMFNaNbNdNiNfZxa@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result6__ctorMFNaNbNcNiNfQEsQCxQEyZSQFxQFw__TQFtTQFqTQDwTQFyZQGjFQGgQElQGmZQCy@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result6__initZ@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result6fixRefFNaNbNiNfxaZxa@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result6lengthMFNaNbNdNiNfZm@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result6moveAtMFNaNbNiNfmZxa@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result7opIndexMFNaNbNiNfmZxa@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result7opSliceMFNaNbNiNjNfmmZSQFrQFq__TQFnTQFkTQDqTQFsZQGdFQGaQEfQGgZQCs@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result7popBackMFNaNbNiNfZv@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result8moveBackMFNaNbNiNfZxa@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result8popFrontMFNaNbNiNfZv@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result9__xtoHashFNbNeKxSQFlQFk__TQFhTQFeTQDkTQFmZQFxFQFuQDzQGaZQCmZm@Base 12
+ _D3std5range__T5chainTSQv3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCtQCs__T10OnlyResultTaZQpTQDaZQDlFQDiQBnQDoZ6Result9moveFrontMFNaNbNiNfZxa@Base 12
+ _D3std5range__T5chainTSQv9algorithm9iteration__T6joinerTSQCdQBjQBc__T9MapResultSQDa8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQExQEdQDw__T12FilterResultSQFyQCyQCsQCmMxFNbNdZ9__lambda1TSQHeQHd__T4iotaTmTmZQkFmmZ6ResultZQDcZQGaZQGzFQGvZQyTSQJfQIlQIe__TQEiSQJvQGvQGpQGjMxFNbNdZ9__lambda3TSQLbQLa__TQDxTmTxmZQEgFmxmZQDyZQGwZQLwFNaNbNiNfQMbQDuZSQNdQNc__TQMzTQMwTQEqZQNlFQNiQFbZQGg@Base 12
+ _D3std5range__T5chainTSQv9algorithm9iteration__T6joinerTSQCdQBjQBc__T9MapResultSQDa8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQExQEdQDw__T12FilterResultSQFyQCyQCsQCmMxFNbNdZ9__lambda1TSQHeQHd__T4iotaTmTmZQkFmmZ6ResultZQDcZQGaZQGzFQGvZQyTSQJfQIlQIe__TQEiSQJvQGvQGpQGjMxFNbNdZ9__lambda3TSQLbQLa__TQDxTmTxmZQEgFmxmZQDyZQGwZQLwFQLtQDmZQEr4saveMFNaNbNdNiNfZSQNqQNp__TQNmTQNjTQFdZQNyFQNvQFoZQGt@Base 12
+ _D3std5range__T5chainTSQv9algorithm9iteration__T6joinerTSQCdQBjQBc__T9MapResultSQDa8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQExQEdQDw__T12FilterResultSQFyQCyQCsQCmMxFNbNdZ9__lambda1TSQHeQHd__T4iotaTmTmZQkFmmZ6ResultZQDcZQGaZQGzFQGvZQyTSQJfQIlQIe__TQEiSQJvQGvQGpQGjMxFNbNdZ9__lambda3TSQLbQLa__TQDxTmTxmZQEgFmxmZQDyZQGwZQLwFQLtQDmZQEr5emptyMFNaNbNdNiZb@Base 12
+ _D3std5range__T5chainTSQv9algorithm9iteration__T6joinerTSQCdQBjQBc__T9MapResultSQDa8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQExQEdQDw__T12FilterResultSQFyQCyQCsQCmMxFNbNdZ9__lambda1TSQHeQHd__T4iotaTmTmZQkFmmZ6ResultZQDcZQGaZQGzFQGvZQyTSQJfQIlQIe__TQEiSQJvQGvQGpQGjMxFNbNdZ9__lambda3TSQLbQLa__TQDxTmTxmZQEgFmxmZQDyZQGwZQLwFQLtQDmZQEr5frontMFNaNbNdNiZm@Base 12
+ _D3std5range__T5chainTSQv9algorithm9iteration__T6joinerTSQCdQBjQBc__T9MapResultSQDa8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQExQEdQDw__T12FilterResultSQFyQCyQCsQCmMxFNbNdZ9__lambda1TSQHeQHd__T4iotaTmTmZQkFmmZ6ResultZQDcZQGaZQGzFQGvZQyTSQJfQIlQIe__TQEiSQJvQGvQGpQGjMxFNbNdZ9__lambda3TSQLbQLa__TQDxTmTxmZQEgFmxmZQDyZQGwZQLwFQLtQDmZQEr6__ctorMFNaNbNcNiNfQMwQEpZSQNyQNx__TQNuTQNrTQFlZQOgFQOdQFwZQHb@Base 12
+ _D3std5range__T5chainTSQv9algorithm9iteration__T6joinerTSQCdQBjQBc__T9MapResultSQDa8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQExQEdQDw__T12FilterResultSQFyQCyQCsQCmMxFNbNdZ9__lambda1TSQHeQHd__T4iotaTmTmZQkFmmZ6ResultZQDcZQGaZQGzFQGvZQyTSQJfQIlQIe__TQEiSQJvQGvQGpQGjMxFNbNdZ9__lambda3TSQLbQLa__TQDxTmTxmZQEgFmxmZQDyZQGwZQLwFQLtQDmZQEr6__initZ@Base 12
+ _D3std5range__T5chainTSQv9algorithm9iteration__T6joinerTSQCdQBjQBc__T9MapResultSQDa8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQExQEdQDw__T12FilterResultSQFyQCyQCsQCmMxFNbNdZ9__lambda1TSQHeQHd__T4iotaTmTmZQkFmmZ6ResultZQDcZQGaZQGzFQGvZQyTSQJfQIlQIe__TQEiSQJvQGvQGpQGjMxFNbNdZ9__lambda3TSQLbQLa__TQDxTmTxmZQEgFmxmZQDyZQGwZQLwFQLtQDmZQEr6fixRefFNaNbNiNfmZm@Base 12
+ _D3std5range__T5chainTSQv9algorithm9iteration__T6joinerTSQCdQBjQBc__T9MapResultSQDa8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQExQEdQDw__T12FilterResultSQFyQCyQCsQCmMxFNbNdZ9__lambda1TSQHeQHd__T4iotaTmTmZQkFmmZ6ResultZQDcZQGaZQGzFQGvZQyTSQJfQIlQIe__TQEiSQJvQGvQGpQGjMxFNbNdZ9__lambda3TSQLbQLa__TQDxTmTxmZQEgFmxmZQDyZQGwZQLwFQLtQDmZQEr8popFrontMFNaNbNiZv@Base 12
+ _D3std5range__T5chainTSQv9algorithm9iteration__T6joinerTSQCdQBjQBc__T9MapResultSQDa8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQExQEdQDw__T12FilterResultSQFyQCyQCsQCmMxFNbNdZ9__lambda1TSQHeQHd__T4iotaTmTmZQkFmmZ6ResultZQDcZQGaZQGzFQGvZQyTSQJfQIlQIe__TQEiSQJvQGvQGpQGjMxFNbNdZ9__lambda3TSQLbQLa__TQDxTmTxmZQEgFmxmZQDyZQGwZQLwFQLtQDmZQEr9moveFrontMFNaNbNiZm@Base 12
+ _D3std5range__T5chainTSQvQt__T4TakeTSQBjQBi__T6RepeatTaZQkZQBdTSQCk4conv__T7toCharsVii10TaVEQDm5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQEvFNaNbNiNfQFaQDoZSQGcQGb__TQFyTQFvTQEkZQGkFQGhQEvZQCi@Base 12
+ _D3std5range__T5chainTSQvQt__T4TakeTSQBjQBi__T6RepeatTaZQkZQBdTSQCk4conv__T7toCharsVii10TaVEQDm5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQEvFQEsQDgZQt4backMFNaNbNdNiNfZa@Base 12
+ _D3std5range__T5chainTSQvQt__T4TakeTSQBjQBi__T6RepeatTaZQkZQBdTSQCk4conv__T7toCharsVii10TaVEQDm5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQEvFQEsQDgZQt4saveMFNaNbNdNiNfZSQGoQGn__TQGkTQGhTQEwZQGwFQGtQFhZQCu@Base 12
+ _D3std5range__T5chainTSQvQt__T4TakeTSQBjQBi__T6RepeatTaZQkZQBdTSQCk4conv__T7toCharsVii10TaVEQDm5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQEvFQEsQDgZQt5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std5range__T5chainTSQvQt__T4TakeTSQBjQBi__T6RepeatTaZQkZQBdTSQCk4conv__T7toCharsVii10TaVEQDm5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQEvFQEsQDgZQt5frontMFNaNbNdNiNfZa@Base 12
+ _D3std5range__T5chainTSQvQt__T4TakeTSQBjQBi__T6RepeatTaZQkZQBdTSQCk4conv__T7toCharsVii10TaVEQDm5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQEvFQEsQDgZQt6__ctorMFNaNbNcNiNfQFuQEiZSQGwQGv__TQGsTQGpTQFeZQHeFQHbQFpZQDc@Base 12
+ _D3std5range__T5chainTSQvQt__T4TakeTSQBjQBi__T6RepeatTaZQkZQBdTSQCk4conv__T7toCharsVii10TaVEQDm5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQEvFQEsQDgZQt6__initZ@Base 12
+ _D3std5range__T5chainTSQvQt__T4TakeTSQBjQBi__T6RepeatTaZQkZQBdTSQCk4conv__T7toCharsVii10TaVEQDm5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQEvFQEsQDgZQt6fixRefFNaNbNiNfaZa@Base 12
+ _D3std5range__T5chainTSQvQt__T4TakeTSQBjQBi__T6RepeatTaZQkZQBdTSQCk4conv__T7toCharsVii10TaVEQDm5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQEvFQEsQDgZQt6lengthMFNaNbNdNiNfZm@Base 12
+ _D3std5range__T5chainTSQvQt__T4TakeTSQBjQBi__T6RepeatTaZQkZQBdTSQCk4conv__T7toCharsVii10TaVEQDm5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQEvFQEsQDgZQt6moveAtMFNaNbNiNfmZa@Base 12
+ _D3std5range__T5chainTSQvQt__T4TakeTSQBjQBi__T6RepeatTaZQkZQBdTSQCk4conv__T7toCharsVii10TaVEQDm5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQEvFQEsQDgZQt7opIndexMFNaNbNiNfmZa@Base 12
+ _D3std5range__T5chainTSQvQt__T4TakeTSQBjQBi__T6RepeatTaZQkZQBdTSQCk4conv__T7toCharsVii10TaVEQDm5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQEvFQEsQDgZQt7opSliceMFNaNbNiNfmmZSQGrQGq__TQGnTQGkTQEzZQGzFQGwQFkZQCx@Base 12
+ _D3std5range__T5chainTSQvQt__T4TakeTSQBjQBi__T6RepeatTaZQkZQBdTSQCk4conv__T7toCharsVii10TaVEQDm5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQEvFQEsQDgZQt7popBackMFNaNbNiNfZv@Base 12
+ _D3std5range__T5chainTSQvQt__T4TakeTSQBjQBi__T6RepeatTaZQkZQBdTSQCk4conv__T7toCharsVii10TaVEQDm5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQEvFQEsQDgZQt8moveBackMFNaNbNiNfZa@Base 12
+ _D3std5range__T5chainTSQvQt__T4TakeTSQBjQBi__T6RepeatTaZQkZQBdTSQCk4conv__T7toCharsVii10TaVEQDm5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQEvFQEsQDgZQt8popFrontMFNaNbNiNfZv@Base 12
+ _D3std5range__T5chainTSQvQt__T4TakeTSQBjQBi__T6RepeatTaZQkZQBdTSQCk4conv__T7toCharsVii10TaVEQDm5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQEvFQEsQDgZQt9moveFrontMFNaNbNiNfZa@Base 12
+ _D3std5range__T5retroTASQw8datetime8timezone13PosixTimeZone10TransitionZQCfFNaNbNiNfQCkZSQDjQDi__TQDfTQDcZQDnFQDkZ__T6ResultZQi@Base 12
+ _D3std5range__T5retroTASQw8datetime8timezone13PosixTimeZone10TransitionZQCfFQCcZ__T6ResultZQi10retroIndexMFNaNbNiNfmZm@Base 12
+ _D3std5range__T5retroTASQw8datetime8timezone13PosixTimeZone10TransitionZQCfFQCcZ__T6ResultZQi11__xopEqualsMxFKxSQEgQEf__TQEcTQDzZQEkFQEhZ__TQCfZQCjZb@Base 12
+ _D3std5range__T5retroTASQw8datetime8timezone13PosixTimeZone10TransitionZQCfFQCcZ__T6ResultZQi13opIndexAssignMFNaNbNiNfQDrmZv@Base 12
+ _D3std5range__T5retroTASQw8datetime8timezone13PosixTimeZone10TransitionZQCfFQCcZ__T6ResultZQi4backMFNaNbNcNdNiNfZQDm@Base 12
+ _D3std5range__T5retroTASQw8datetime8timezone13PosixTimeZone10TransitionZQCfFQCcZ__T6ResultZQi4backMFNaNbNdNiNfQDjZv@Base 12
+ _D3std5range__T5retroTASQw8datetime8timezone13PosixTimeZone10TransitionZQCfFQCcZ__T6ResultZQi4saveMFNaNbNdNiNfZSQEgQEf__TQEcTQDzZQEkFQEhZ__TQCfZQCj@Base 12
+ _D3std5range__T5retroTASQw8datetime8timezone13PosixTimeZone10TransitionZQCfFQCcZ__T6ResultZQi5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std5range__T5retroTASQw8datetime8timezone13PosixTimeZone10TransitionZQCfFQCcZ__T6ResultZQi5frontMFNaNbNcNdNiNfZQDn@Base 12
+ _D3std5range__T5retroTASQw8datetime8timezone13PosixTimeZone10TransitionZQCfFQCcZ__T6ResultZQi5frontMFNaNbNdNiNfQDkZv@Base 12
+ _D3std5range__T5retroTASQw8datetime8timezone13PosixTimeZone10TransitionZQCfFQCcZ__T6ResultZQi6__initZ@Base 12
+ _D3std5range__T5retroTASQw8datetime8timezone13PosixTimeZone10TransitionZQCfFQCcZ__T6ResultZQi6moveAtMFNaNbNiNfmZQDl@Base 12
+ _D3std5range__T5retroTASQw8datetime8timezone13PosixTimeZone10TransitionZQCfFQCcZ__T6ResultZQi7opIndexMFNaNbNcNiNfmZQDo@Base 12
+ _D3std5range__T5retroTASQw8datetime8timezone13PosixTimeZone10TransitionZQCfFQCcZ__T6ResultZQi7opSliceMFNaNbNiNfmmZSQEjQEi__TQEfTQEcZQEnFQEkZ__TQCiZQCm@Base 12
+ _D3std5range__T5retroTASQw8datetime8timezone13PosixTimeZone10TransitionZQCfFQCcZ__T6ResultZQi7popBackMFNaNbNiNfZv@Base 12
+ _D3std5range__T5retroTASQw8datetime8timezone13PosixTimeZone10TransitionZQCfFQCcZ__T6ResultZQi8moveBackMFNaNbNiNfZQDm@Base 12
+ _D3std5range__T5retroTASQw8datetime8timezone13PosixTimeZone10TransitionZQCfFQCcZ__T6ResultZQi8popFrontMFNaNbNiNfZv@Base 12
+ _D3std5range__T5retroTASQw8datetime8timezone13PosixTimeZone10TransitionZQCfFQCcZ__T6ResultZQi9__mixin176lengthMFNaNbNdNiNfZm@Base 12
+ _D3std5range__T5retroTASQw8datetime8timezone13PosixTimeZone10TransitionZQCfFQCcZ__T6ResultZQi9__xtoHashFNbNeKxSQEfQEe__TQEbTQDyZQEjFQEgZ__TQCeZQCiZm@Base 12
+ _D3std5range__T5retroTASQw8datetime8timezone13PosixTimeZone10TransitionZQCfFQCcZ__T6ResultZQi9moveFrontMFNaNbNiNfZQDn@Base 12
+ _D3std5range__T6ChunksTAhZQl11DollarToken6__initZ@Base 12
+ _D3std5range__T6ChunksTAhZQl11DollarToken9momLengthMFNaNbNdNiNfZm@Base 12
+ _D3std5range__T6ChunksTAhZQl11__xopEqualsMxFKxSQBtQBs__TQBpTQBlZQBxZb@Base 12
+ _D3std5range__T6ChunksTAhZQl4backMFNaNbNdNiNfZQx@Base 12
+ _D3std5range__T6ChunksTAhZQl4saveMFNaNbNdNiNfZSQBtQBs__TQBpTQBlZQBx@Base 12
+ _D3std5range__T6ChunksTAhZQl5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std5range__T6ChunksTAhZQl5frontMFNaNbNdNiNfZQy@Base 12
+ _D3std5range__T6ChunksTAhZQl6__ctorMFNaNbNcNiNfQymZSQByQBx__TQBuTQBqZQCc@Base 12
+ _D3std5range__T6ChunksTAhZQl6__initZ@Base 12
+ _D3std5range__T6ChunksTAhZQl6lengthMFNaNbNdNiNfZm@Base 12
+ _D3std5range__T6ChunksTAhZQl7opIndexMFNaNbNiNfmZQz@Base 12
+ _D3std5range__T6ChunksTAhZQl7opSliceMFNaNbNiNfSQBtQBs__TQBpTQBlZQBx11DollarTokenQBiZSQDfQDe__TQDbTQCxZQDj@Base 12
+ _D3std5range__T6ChunksTAhZQl7opSliceMFNaNbNiNfSQBtQBs__TQBpTQBlZQBx11DollarTokenmZSQDdQDc__TQCzTQCvZQDh@Base 12
+ _D3std5range__T6ChunksTAhZQl7opSliceMFNaNbNiNfmSQBuQBt__TQBqTQBmZQBy11DollarTokenZSQDdQDc__TQCzTQCvZQDh@Base 12
+ _D3std5range__T6ChunksTAhZQl7opSliceMFNaNbNiNfmmZSQBwQBv__TQBsTQBoZQCa@Base 12
+ _D3std5range__T6ChunksTAhZQl7popBackMFNaNbNiNfZv@Base 12
+ _D3std5range__T6ChunksTAhZQl8opDollarMFNaNbNiNfZSQBvQBu__TQBrTQBnZQBz11DollarToken@Base 12
+ _D3std5range__T6ChunksTAhZQl8popFrontMFNaNbNiNfZv@Base 12
+ _D3std5range__T6ChunksTAhZQl9__xtoHashFNbNeKxSQBsQBr__TQBoTQBkZQBwZm@Base 12
+ _D3std5range__T6RepeatTaZQk11DollarToken6__initZ@Base 12
+ _D3std5range__T6RepeatTaZQk4backMNgFNaNbNdNiNfZNga@Base 12
+ _D3std5range__T6RepeatTaZQk4saveMNgFNaNbNdNiNfZNgSQBwQBv__TQBsTaZQBy@Base 12
+ _D3std5range__T6RepeatTaZQk5frontMNgFNaNbNdNiNfZNga@Base 12
+ _D3std5range__T6RepeatTaZQk6__initZ@Base 12
+ _D3std5range__T6RepeatTaZQk7opIndexMNgFNaNbNiNfmZNga@Base 12
+ _D3std5range__T6RepeatTaZQk7opSliceMFNaNbNiNfmmZSQBvQBu__T4TakeTSQClQCk__TQChTaZQCnZQBa@Base 12
+ _D3std5range__T6RepeatTaZQk7opSliceMNgFNaNbNiNfmSQBvQBu__TQBrTaZQBx11DollarTokenZNgSQDeQDd__TQDaTaZQDg@Base 12
+ _D3std5range__T6RepeatTaZQk7popBackMFNaNbNiNfZv@Base 12
+ _D3std5range__T6RepeatTaZQk8popFrontMFNaNbNiNfZv@Base 12
+ _D3std5range__T6chunksTAhZQlFNaNbNiNfQomZSQBoQBn__T6ChunksTQBkZQm@Base 12
+ _D3std5range__T6repeatTaZQkFNaNbNiNfaZSQBlQBk__T6RepeatTaZQk@Base 12
+ _D3std5range__T6repeatTaZQkFNaNbNiNfamZSQBmQBl__T4TakeTSQCcQCb__T6RepeatTaZQkZQBd@Base 12
+ _D3std5range__T7padLeftTSQx4conv__T7toCharsVii10TaVEQBy5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultTaZQDjFNaNbNiNfQDmamZSQEpQEo__T5chainTSQFgQFf__T4TakeTSQFwQFv__T6RepeatTaZQkZQBdTQGaZQCcFQBzQGlZQDz@Base 12
+ _D3std5regex11__moduleRefZ@Base 12
+ _D3std5regex12__ModuleInfoZ@Base 12
+ _D3std5regex8internal12backtracking11__moduleRefZ@Base 12
+ _D3std5regex8internal12backtracking12__ModuleInfoZ@Base 12
+ _D3std5regex8internal12backtracking9CtContext10ctAtomCodeMFAxSQCiQChQCe2ir8BytecodeiZAya@Base 12
+ _D3std5regex8internal12backtracking9CtContext10ctGenBlockMFAxSQCiQChQCe2ir8BytecodeiZSQDgQDfQDcQCwQCl7CtState@Base 12
+ _D3std5regex8internal12backtracking9CtContext10ctGenGroupMFKAxSQCjQCiQCf2ir8BytecodeiZSQDhQDgQDdQCxQCm7CtState@Base 12
+ _D3std5regex8internal12backtracking9CtContext10ctGenRegExMFAxSQCiQChQCe2ir8BytecodeZAya@Base 12
+ _D3std5regex8internal12backtracking9CtContext10lookaroundMFkkZSQCjQCiQCfQBzQBo@Base 12
+ _D3std5regex8internal12backtracking9CtContext11__xopEqualsMxFKxSQCkQCjQCgQCaQBpZb@Base 12
+ _D3std5regex8internal12backtracking9CtContext11ctQuickTestMFAxSQCjQCiQCf2ir8BytecodeiZAya@Base 12
+ _D3std5regex8internal12backtracking9CtContext11restoreCodeMFZAya@Base 12
+ _D3std5regex8internal12backtracking9CtContext14ctGenFixupCodeMFAxSQCmQClQCi2ir8BytecodeiiZAya@Base 12
+ _D3std5regex8internal12backtracking9CtContext14ctGenFixupCodeMFKAxSQCnQCmQCj2ir8BytecodeiiZAya@Base 12
+ _D3std5regex8internal12backtracking9CtContext16ctGenAlternationMFAxSQCoQCnQCk2ir8BytecodeiZSQDmQDlQDiQDcQCr7CtState@Base 12
+ _D3std5regex8internal12backtracking9CtContext6__initZ@Base 12
+ _D3std5regex8internal12backtracking9CtContext7CtState11__xopEqualsMxFKxSQCsQCrQCoQCiQBxQBqZb@Base 12
+ _D3std5regex8internal12backtracking9CtContext7CtState6__initZ@Base 12
+ _D3std5regex8internal12backtracking9CtContext7CtState9__xtoHashFNbNeKxSQCrQCqQCnQChQBwQBpZm@Base 12
+ _D3std5regex8internal12backtracking9CtContext8saveCodeMFkAyaZQe@Base 12
+ _D3std5regex8internal12backtracking9CtContext9__xtoHashFNbNeKxSQCjQCiQCfQBzQBoZm@Base 12
+ _D3std5regex8internal12backtracking9CtContext9ctGenAtomMFKAxSQChQCgQCd2ir8BytecodeiZSQDfQDeQDbQCvQCk7CtState@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl10bwdMatcherMFNaNeKxSQFtQFsQFpQDk__T5RegexTaZQjAvZCQGxQGwQGtQGn__TQGcTaTQEdZQGm@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl10fwdMatcherMFNaNbNeKxSQFvQFuQFrQDm__T5RegexTaZQjAvZCQGzQGyQGvQGp__TQGeTaTQFmZQGo@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl10initializeMFNaNbNiNeKxSQFxQFwQFtQDo__T5RegexTaZQjQEpAvZv@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl10stackAvailMFNaNbNdNiNeZm@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl13initialMemoryFNaNbNiNeKxSQFzQFyQFvQDq__T5RegexTaZQjZm@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl13matchFinalizeMFNaNeZi@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl18initExternalMemoryMFNaNbNiNeAvZv@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl4nextMFNaNeZv@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl5State6__initZ@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl5Trace4markMFNaNbNiNemZb@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl5Trace6__initZ@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl5atEndMFNaNdNeZb@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl5dupToMFNeCQFjQFiQFfQDa__T7MatcherTaZQlAvZv@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl5matchMFNaNeASQFmQFlQFiQDd__T5GroupTmZQjZi@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl5rearmMFNeIAaZCQFnQFmQFjQDe__T7MatcherTaZQl@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl6__ctorMFNaNbNiNeKxSQFsQFrQFoQDj__T5RegexTaZQjQEkAvwmZCQHbQHaQGxQGr__TQGgTaTQFoZQGq@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl6__ctorMFNaNeKxSQFoQFnQFkQDf__T5RegexTaZQjPFNaNeCQGvQGuQGrQGl__TQGaTaTQFiZQGkZbQFrAvZQBl@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl6__ctorMFNaNeKxSQFoQFnQFkQDf__T5RegexTaZQjQEgAvZCQGvQGuQGrQGl__TQGaTaTQFiZQGk@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl6__initZ@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl6__vtblZ@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl6searchMFNaNeZv@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl7__ClassZ@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl7atStartMFNaNbNdNiNeZb@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl7patternMFNcNdNeZxSQFrQFqQFnQDi__T5RegexTaZQj@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl8newStackMFNaNbNiNeZv@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl8popStateMFNaNeZb@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl8refCountMFNcNdNeZm@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl9matchImplMFNaNeZi@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl9prevStackMFNaNbNiNeZb@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl9pushStateMFNaNbNiNekkZv@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T14BackLooperImplTSQDqQDpQDmQBh__T5InputTaZQjZQBtZQDl9stackSizeFNaNbNiNeKxSQFuQFtQFqQDl__T5RegexTaZQjZm@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa10bwdMatcherMFNaNeKxSQEiQEhQEeQBz__T5RegexTaZQjAvZCQFmQFlQFiQFc__TQErTaTSQGiQGhQGeQDz__T14BackLooperImplTQFgZQvZQGl@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa10fwdMatcherMFNaNbNeKxSQEkQEjQEgQCb__T5RegexTaZQjAvZCQFoQFnQFkQFe__TQEtTaTQEbZQFd@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa10initializeMFNaNbNiNeKxSQEmQElQEiQCd__T5RegexTaZQjQDeAvZv@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa10stackAvailMFNaNbNdNiNeZm@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa13initialMemoryFNaNbNiNeKxSQEoQEnQEkQCf__T5RegexTaZQjZm@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa13matchFinalizeMFNaNeZi@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa18initExternalMemoryMFNaNbNiNeAvZv@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa4nextMFNaNeZv@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa5State6__initZ@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa5Trace4markMFNaNbNiNemZb@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa5Trace6__initZ@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa5atEndMFNaNbNdNiNeZb@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa5dupToMFNeCQDyQDxQDuQBp__T7MatcherTaZQlAvZv@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa5matchMFNaNeASQEbQEaQDxQBs__T5GroupTmZQjZi@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa5rearmMFNeIAaZCQEcQEbQDyQBt__T7MatcherTaZQl@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa6__ctorMFNaNbNiNeKxSQEhQEgQEdQBy__T5RegexTaZQjQCzAvwmZCQFqQFpQFmQFg__TQEvTaTQEdZQFf@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa6__ctorMFNaNeKxSQEdQEcQDzQBu__T5RegexTaZQjPFNaNeCQFkQFjQFgQFa__TQEpTaTQDxZQEzZbQEgAvZQBl@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa6__ctorMFNaNeKxSQEdQEcQDzQBu__T5RegexTaZQjQCvAvZCQFkQFjQFgQFa__TQEpTaTQDxZQEz@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa6__initZ@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa6__vtblZ@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa6searchMFNaNeZv@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa7__ClassZ@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa7atStartMFNaNbNdNiNeZb@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa7patternMFNcNdNeZxSQEgQEfQEcQBx__T5RegexTaZQj@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa8newStackMFNaNbNiNeZv@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa8popStateMFNaNeZb@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa8refCountMFNcNdNeZm@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa9matchImplMFNaNeZi@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa9prevStackMFNaNbNiNeZb@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa9pushStateMFNaNbNiNekkZv@Base 12
+ _D3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCa9stackSizeFNaNbNiNeKxSQEjQEiQEfQCa__T5RegexTaZQjZm@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTAyaTQeTQhTQkTQnZQxFNaNbNeQzQBbQBeQBhQBkQBnZQBr@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTAyaTQeTQhTQkZQuFNaNbNeQwQyQBaQBdQBgZQBk@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTAyaTQeTQhTxkTxkTiTQsTQvTQyZQBiFNaNbNeQBlQBoQBrQBuxkxkiQCcQCfQCiZQCm@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTAyaTQeTQhZQrFNaNbNeQtQvQxQzZQBc@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTAyaTQeTiTQjTQmTQpTxkTxkTiTQBaTQBeTQBiZQBtFNaNbNeQBwQBzQCciQCgQCjQCmxkxkiQCuQCxQDaZQDe@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTAyaTQeTiTQjTiTQoTiZQBaFNaNbNeQBdQBgQBjiQBniQBriZQBw@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTAyaTQeTiTiTQlZQvFNaNbNeQxQzQBbiiQBgZQBk@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTAyaTQeTiZQqFNaNbNeQsQuQwiZQBa@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTAyaTQeTxkTxkTiTQpTQsTQvZQBfFNaNbNeQBiQBlQBoxkxkiQBwQBzQCcZQCg@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTAyaTQeZQoFNaNbNeQqQsQuZQx@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTAyaTiTQgTQjTQmTxkTxkTiTQxTQBaTQBeZQBpFNaNbNeQBsQBviQBzQCcQCfxkxkiQCnQCqQCtZQCx@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTAyaTiTQgTiTQlTiZQxFNaNbNeQzQBbiQBfiQBjiZQBo@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTAyaTiTQgTiZQsFNaNbNeQuQwiQziZQBd@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTAyaTiTiTQiTiZQuFNaNbNeQwQyiiQBciZQBh@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTAyaTiTiTQiZQsFNaNbNeQuQwiiQBaZQBe@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTAyaTiZQnFNaNbNeQpQriZQv@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTAyaTxkTxkTiTQmTQpTQsZQBcFNaNbNeQBfQBixkxkiQBqQBtQBwZQCa@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTAyaTykTiZQqFNaNbNeQsQuykiZQBa@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTAyaZQlFNaNbNeQnQpZQs@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTiTAyaTQeTQhTxkTxkTiTQsTQvTQyZQBkFNaNbNeQBliQBpQBsQBvxkxkiQCdQCgQCjZQCn@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTiTAyaTQeTQhZQtFNaNbNeQtiQwQyQBaZQBe@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTiTAyaTQeTiTQjTQmTQpTxkTxkTiTQBaTQBeTQBiZQBvFNaNbNeQBwiQCaQCdiQChQCkQCnxkxkiQCvQCyQDbZQDf@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTiTAyaTQeTiZQsFNaNbNeQsiQvQxiZQBb@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTiTAyaTiTQgTiZQuFNaNbNeQuiQxiQBaiZQBf@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTiTAyaTiZQpFNaNbNeQpiQsiZQw@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTiTAyaZQnFNaNbNeQniQqZQt@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTiTiTAyaTiZQrFNaNbNeQpiiQtiZQx@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTiTiTAyaZQpFNaNbNeQniiQrZQu@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTiTiZQlFNaNbNeAyaiiZQg@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTiTykTiZQoFNaNbNeAyaiykiZQi@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTiZQjFNaNbNeAyaiZQf@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTkTAyaTQeTQhZQtFNaNbNeQtkQwQyQBaZQBe@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTkTAyaTQeZQqFNaNbNeQqkQtQvZQy@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTkTAyaZQnFNaNbNeQnkQqZQt@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTkTkZQlFNaNbNeAyakkZQg@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTkZQjFNaNbNeAyakZQf@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTxkTAyaTQeTiTQjTiTQoTiZQBdFNaNbNeQBdxkQBiQBliQBpiQBtiZQBy@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTxkTAyaTQeTiTiTQlZQyFNaNbNeQxxkQBbQBeiiQBjZQBn@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTxkTiTAyaTQeTQhZQwFNaNbNeQtxkiQyQBaQBdZQBh@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTxkTxkTiTAyaTQeTQhZQzFNaNbNeQtxkxkiQBaQBdQBgZQBk@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTxkTykTiTykTiZQuFNaNbNeAyaxkykiykiZQm@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTykTAyaTykTiZQtFNaNbNeQsykQwykiZQBc@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTykTiTiTAyaTiZQuFNaNbNeQpykiiQviZQz@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTykTiTykTiZQrFNaNbNeAyaykiykiZQk@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTykTiZQmFNaNbNeAyaykiZQh@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubTykZQkFNaNbNeAyaykZQg@Base 12
+ _D3std5regex8internal12backtracking__T5ctSubZQhFNaNbNiNeAyaZQe@Base 12
+ _D3std5regex8internal2ir10NamedGroup11__xopEqualsMxFKxSQCbQCaQBxQBrQBrZb@Base 12
+ _D3std5regex8internal2ir10NamedGroup6__initZ@Base 12
+ _D3std5regex8internal2ir10NamedGroup9__xtoHashFNbNeKxSQCaQBzQBwQBqQBqZm@Base 12
+ _D3std5regex8internal2ir10getMatcherFNeSQBm3uni__T13InversionListTSQCnQBb8GcPolicyZQBhZSQDiQDhQDeQCy11CharMatcher@Base 12
+ _D3std5regex8internal2ir10lengthOfIRFNaNbNiNfEQBsQBrQBoQBi2IRZi@Base 12
+ _D3std5regex8internal2ir11CharMatcher11__xopEqualsMxFKxSQCcQCbQByQBsQBsZb@Base 12
+ _D3std5regex8internal2ir11CharMatcher6__ctorMFNcSQBv3uni__T13InversionListTSQCwQBb8GcPolicyZQBhZSQDrQDqQDnQDhQDh@Base 12
+ _D3std5regex8internal2ir11CharMatcher6__initZ@Base 12
+ _D3std5regex8internal2ir11CharMatcher9__xtoHashFNbNeKxSQCbQCaQBxQBrQBrZm@Base 12
+ _D3std5regex8internal2ir11CharMatcher__T7opIndexZQjMxFNaNbNiNfwZb@Base 12
+ _D3std5regex8internal2ir11RegexOption6__initZ@Base 12
+ _D3std5regex8internal2ir11__moduleRefZ@Base 12
+ _D3std5regex8internal2ir11disassembleFNeIASQBpQBoQBlQBf8BytecodekIASQCoQCnQCkQCe10NamedGroupZAya@Base 12
+ _D3std5regex8internal2ir12__ModuleInfoZ@Base 12
+ _D3std5regex8internal2ir12matcherCacheHSQBm3uni__T13InversionListTSQCnQBb8GcPolicyZQBhSQDhQDgQDdQCx11CharMatcher@Base 12
+ _D3std5regex8internal2ir14RegexException6__initZ@Base 12
+ _D3std5regex8internal2ir14RegexException6__vtblZ@Base 12
+ _D3std5regex8internal2ir14RegexException7__ClassZ@Base 12
+ _D3std5regex8internal2ir14RegexException8__mixin16__ctorMFNaNbNiNfAyaC6object9ThrowableQvmZCQDmQDlQDiQDcQDc@Base 12
+ _D3std5regex8internal2ir14RegexException8__mixin16__ctorMFNaNbNiNfAyaQdmC6object9ThrowableZCQDmQDlQDiQDcQDc@Base 12
+ _D3std5regex8internal2ir16lengthOfPairedIRFNaNbNiNfEQByQBxQBuQBo2IRZi@Base 12
+ _D3std5regex8internal2ir17immediateParamsIRFNaNbNiNfEQBzQByQBvQBp2IRZi@Base 12
+ _D3std5regex8internal2ir2IR6__initZ@Base 12
+ _D3std5regex8internal2ir7isEndIRFNaNbNiNfEQBoQBnQBkQBe2IRZb@Base 12
+ _D3std5regex8internal2ir8BitTable6__ctorMFNcSQBr3uni__T13InversionListTSQCsQBb8GcPolicyZQBhZSQDnQDmQDjQDdQDd@Base 12
+ _D3std5regex8internal2ir8BitTable6__initZ@Base 12
+ _D3std5regex8internal2ir8BitTable__T3addZQfMFNaNbNiNfwZv@Base 12
+ _D3std5regex8internal2ir8BitTable__T5indexZQhFNaNbNiNfwZk@Base 12
+ _D3std5regex8internal2ir8BitTable__T7opIndexZQjMxFNaNbNiNfwZb@Base 12
+ _D3std5regex8internal2ir8Bytecode11indexOfPairMxFNaNfkZk@Base 12
+ _D3std5regex8internal2ir8Bytecode11setLocalRefMFNaNfZv@Base 12
+ _D3std5regex8internal2ir8Bytecode12pairedLengthMxFNaNdNfZk@Base 12
+ _D3std5regex8internal2ir8Bytecode13backreferenceMxFNaNdNfZb@Base 12
+ _D3std5regex8internal2ir8Bytecode14setBackrefenceMFNaNfZv@Base 12
+ _D3std5regex8internal2ir8Bytecode4argsMxFNaNdNfZi@Base 12
+ _D3std5regex8internal2ir8Bytecode5isEndMxFNaNdNfZb@Base 12
+ _D3std5regex8internal2ir8Bytecode6__ctorMFNaNcNfEQBvQBuQBrQBl2IRkZSQCnQCmQCjQCdQCd@Base 12
+ _D3std5regex8internal2ir8Bytecode6__ctorMFNaNcNfEQBvQBuQBrQBl2IRkkZSQCoQCnQCkQCeQCe@Base 12
+ _D3std5regex8internal2ir8Bytecode6__initZ@Base 12
+ _D3std5regex8internal2ir8Bytecode6isAtomMxFNaNdNfZb@Base 12
+ _D3std5regex8internal2ir8Bytecode6lengthMxFNaNdNfZk@Base 12
+ _D3std5regex8internal2ir8Bytecode6pairedMxFNaNdNfZSQBxQBwQBtQBnQBn@Base 12
+ _D3std5regex8internal2ir8Bytecode7fromRawFNaNfkZSQBvQBuQBrQBlQBl@Base 12
+ _D3std5regex8internal2ir8Bytecode7hotspotMxFNaNdNfZb@Base 12
+ _D3std5regex8internal2ir8Bytecode7isStartMxFNaNdNfZb@Base 12
+ _D3std5regex8internal2ir8Bytecode8localRefMxFNaNdNfZb@Base 12
+ _D3std5regex8internal2ir8Bytecode__T4codeZQgMxFNaNbNdNiNfZEQCfQCeQCbQBv2IR@Base 12
+ _D3std5regex8internal2ir8Bytecode__T4dataZQgMFNaNbNdNiNfkZv@Base 12
+ _D3std5regex8internal2ir8Bytecode__T4dataZQgMxFNaNbNdNiNfZk@Base 12
+ _D3std5regex8internal2ir8Bytecode__T8mnemonicZQkMxFNaNdNeZAya@Base 12
+ _D3std5regex8internal2ir8Bytecode__T8sequenceZQkMxFNaNbNdNiNfZk@Base 12
+ _D3std5regex8internal2ir8hasMergeFNaNbNiNfEQBpQBoQBlQBf2IRZb@Base 12
+ _D3std5regex8internal2ir8isAtomIRFNaNbNiNfEQBpQBoQBlQBf2IRZb@Base 12
+ _D3std5regex8internal2ir8pairedIRFNaNbNiNfEQBpQBoQBlQBf2IRZQr@Base 12
+ _D3std5regex8internal2ir9RegexInfo6__initZ@Base 12
+ _D3std5regex8internal2ir9isStartIRFNaNbNiNfEQBqQBpQBmQBg2IRZb@Base 12
+ _D3std5regex8internal2ir__T11mallocArrayTmZQqFNaNbNimZAm@Base 12
+ _D3std5regex8internal2ir__T11startOfLineZQoFNaNbNiNfwbZb@Base 12
+ _D3std5regex8internal2ir__T11wordMatcherZQoFNaNbNcNdNiNfZySQCfQCeQCbQBv11CharMatcher@Base 12
+ _D3std5regex8internal2ir__T11wordMatcherZQoFNcNdZ7matcherySQCfQCeQCbQBv11CharMatcher@Base 12
+ _D3std5regex8internal2ir__T12arrayInChunkTPFNaNeCQBvQBuQBr8thompson__T15ThompsonMatcherTaTSQDlQDkQDhQDb__T14BackLooperImplTSQEsQErQEoQEi__T5InputTaZQjZQBtZQDhPSQGcQGbQFyQEh__TQEbTaTQDnZQEl5StateZbZQGoFNaNbNimKAvZAQGp@Base 12
+ _D3std5regex8internal2ir__T12arrayInChunkTPFNaNeCQBvQBuQBr8thompson__T15ThompsonMatcherTaTSQDlQDkQDhQDb__T5InputTaZQjZQBwPSQErQEqQEnQCw__TQCqTaTQCcZQDa5StateZbZQFdFNaNbNimKAvZAQFe@Base 12
+ _D3std5regex8internal2ir__T12arrayInChunkTSQBpQBoQBl12backtracking__T19BacktrackingMatcherTaTSQDoQDnQDkQDe__T14BackLooperImplTSQEvQEuQErQEl__T5InputTaZQjZQBtZQDl5TraceZQFlFNaNbNimKAvZAQFm@Base 12
+ _D3std5regex8internal2ir__T12arrayInChunkTSQBpQBoQBl12backtracking__T19BacktrackingMatcherTaTSQDoQDnQDkQDe__T5InputTaZQjZQCa5TraceZQEaFNaNbNimKAvZAQEb@Base 12
+ _D3std5regex8internal2ir__T12arrayInChunkTmZQrFNaNbNimKAvZAm@Base 12
+ _D3std5regex8internal2ir__T14BackLooperImplTSQBrQBqQBnQBh__T5InputTaZQjZQBt11__xopEqualsMxFKxSQDoQDnQDkQDe__TQDeTQCrZQDmZb@Base 12
+ _D3std5regex8internal2ir__T14BackLooperImplTSQBrQBqQBnQBh__T5InputTaZQjZQBt5atEndMFNaNdNfZb@Base 12
+ _D3std5regex8internal2ir__T14BackLooperImplTSQBrQBqQBnQBh__T5InputTaZQjZQBt5resetMFNaNbNiNfmZv@Base 12
+ _D3std5regex8internal2ir__T14BackLooperImplTSQBrQBqQBnQBh__T5InputTaZQjZQBt6__ctorMFNaNbNcNiNfAxaZSQDtQDsQDpQDj__TQDjTQCwZQDr@Base 12
+ _D3std5regex8internal2ir__T14BackLooperImplTSQBrQBqQBnQBh__T5InputTaZQjZQBt6__ctorMFNaNbNcNiNfQBymZSQDuQDtQDqQDk__TQDkTQCxZQDs@Base 12
+ _D3std5regex8internal2ir__T14BackLooperImplTSQBrQBqQBnQBh__T5InputTaZQjZQBt6__initZ@Base 12
+ _D3std5regex8internal2ir__T14BackLooperImplTSQBrQBqQBnQBh__T5InputTaZQjZQBt7opSliceMFNaNbNiNfmmZAxa@Base 12
+ _D3std5regex8internal2ir__T14BackLooperImplTSQBrQBqQBnQBh__T5InputTaZQjZQBt8loopBackMFNaNbNiNfmZQCa@Base 12
+ _D3std5regex8internal2ir__T14BackLooperImplTSQBrQBqQBnQBh__T5InputTaZQjZQBt8nextCharMFNaNeKwKmZb@Base 12
+ _D3std5regex8internal2ir__T14BackLooperImplTSQBrQBqQBnQBh__T5InputTaZQjZQBt9__xtoHashFNbNeKxSQDnQDmQDjQDd__TQDdTQCqZQDlZm@Base 12
+ _D3std5regex8internal2ir__T14BackLooperImplTSQBrQBqQBnQBh__T5InputTaZQjZQBt9lastIndexMFNaNbNdNiNfZm@Base 12
+ _D3std5regex8internal2ir__T14GenericFactorySQBqQBpQBm12backtracking19BacktrackingMatcherTaZQCm3dupMxFNeCQDyQDxQDuQDo__T7MatcherTaZQlIAaZCQFfQFeQFbQDp__TQDhTaTSQGbQGaQFxQFr__T5InputTaZQjZQEp@Base 12
+ _D3std5regex8internal2ir__T14GenericFactorySQBqQBpQBm12backtracking19BacktrackingMatcherTaZQCm6__initZ@Base 12
+ _D3std5regex8internal2ir__T14GenericFactorySQBqQBpQBm12backtracking19BacktrackingMatcherTaZQCm6__vtblZ@Base 12
+ _D3std5regex8internal2ir__T14GenericFactorySQBqQBpQBm12backtracking19BacktrackingMatcherTaZQCm6createMxFNeKxSQEdQEcQDzQDt__T5RegexTaZQjIAaZCQFiQFhQFeQDs__TQDkTaTSQGeQGdQGaQFu__T5InputTaZQjZQEs@Base 12
+ _D3std5regex8internal2ir__T14GenericFactorySQBqQBpQBm12backtracking19BacktrackingMatcherTaZQCm6decRefMxFNeCQEbQEaQDxQDr__T7MatcherTaZQlZm@Base 12
+ _D3std5regex8internal2ir__T14GenericFactorySQBqQBpQBm12backtracking19BacktrackingMatcherTaZQCm6incRefMxFNfCQEbQEaQDxQDr__T7MatcherTaZQlZm@Base 12
+ _D3std5regex8internal2ir__T14GenericFactorySQBqQBpQBm12backtracking19BacktrackingMatcherTaZQCm7__ClassZ@Base 12
+ _D3std5regex8internal2ir__T14GenericFactorySQBqQBpQBm8thompson15ThompsonMatcherTaZQCd3dupMxFNeCQDpQDoQDlQDf__T7MatcherTaZQlIAaZCQEwQEvQEsQDg__TQDdTaTSQFsQFrQFoQFi__T5InputTaZQjZQEl@Base 12
+ _D3std5regex8internal2ir__T14GenericFactorySQBqQBpQBm8thompson15ThompsonMatcherTaZQCd6__initZ@Base 12
+ _D3std5regex8internal2ir__T14GenericFactorySQBqQBpQBm8thompson15ThompsonMatcherTaZQCd6__vtblZ@Base 12
+ _D3std5regex8internal2ir__T14GenericFactorySQBqQBpQBm8thompson15ThompsonMatcherTaZQCd6createMxFNeKxSQDuQDtQDqQDk__T5RegexTaZQjIAaZCQEzQEyQEvQDj__TQDgTaTSQFvQFuQFrQFl__T5InputTaZQjZQEo@Base 12
+ _D3std5regex8internal2ir__T14GenericFactorySQBqQBpQBm8thompson15ThompsonMatcherTaZQCd6decRefMxFNeCQDsQDrQDoQDi__T7MatcherTaZQlZm@Base 12
+ _D3std5regex8internal2ir__T14GenericFactorySQBqQBpQBm8thompson15ThompsonMatcherTaZQCd6incRefMxFNfCQDsQDrQDoQDi__T7MatcherTaZQlZm@Base 12
+ _D3std5regex8internal2ir__T14GenericFactorySQBqQBpQBm8thompson15ThompsonMatcherTaZQCd7__ClassZ@Base 12
+ _D3std5regex8internal2ir__T14MatcherFactoryTaZQt11__InterfaceZ@Base 12
+ _D3std5regex8internal2ir__T14RuntimeFactorySQBqQBpQBm12backtracking19BacktrackingMatcherTaZQCm6__initZ@Base 12
+ _D3std5regex8internal2ir__T14RuntimeFactorySQBqQBpQBm12backtracking19BacktrackingMatcherTaZQCm6__vtblZ@Base 12
+ _D3std5regex8internal2ir__T14RuntimeFactorySQBqQBpQBm12backtracking19BacktrackingMatcherTaZQCm7__ClassZ@Base 12
+ _D3std5regex8internal2ir__T14RuntimeFactorySQBqQBpQBm12backtracking19BacktrackingMatcherTaZQCm9constructMxFKxSQEeQEdQEaQDu__T5RegexTaZQjIAaAvZCQFlQFkQFhQDv__TQDnTaTSQGhQGgQGdQFx__T5InputTaZQjZQEv@Base 12
+ _D3std5regex8internal2ir__T14RuntimeFactorySQBqQBpQBm8thompson15ThompsonMatcherTaZQCd6__initZ@Base 12
+ _D3std5regex8internal2ir__T14RuntimeFactorySQBqQBpQBm8thompson15ThompsonMatcherTaZQCd6__vtblZ@Base 12
+ _D3std5regex8internal2ir__T14RuntimeFactorySQBqQBpQBm8thompson15ThompsonMatcherTaZQCd7__ClassZ@Base 12
+ _D3std5regex8internal2ir__T14RuntimeFactorySQBqQBpQBm8thompson15ThompsonMatcherTaZQCd9constructMxFKxSQDvQDuQDrQDl__T5RegexTaZQjIAaAvZCQFcQFbQEyQDm__TQDjTaTSQFyQFxQFuQFo__T5InputTaZQjZQEr@Base 12
+ _D3std5regex8internal2ir__T14defaultFactoryTaZQtFNaNbNdNfKxSQCgQCfQCcQBw__T5RegexTaZQjZCQDiQDhQDeQCy__T14MatcherFactoryTaZQt@Base 12
+ _D3std5regex8internal2ir__T14defaultFactoryTaZQtFNaNdNfKxSQCeQCdQCaQBu__T5RegexTaZQjZ4implFNbNfKxQBoZCQDwQDvQDsQDm__T14MatcherFactoryTaZQt@Base 12
+ _D3std5regex8internal2ir__T14defaultFactoryTaZQtFNaNdNfKxSQCeQCdQCaQBu__T5RegexTaZQjZ8pureImplFNaNbNeKxQBuZCQEcQEbQDyQDs__T14MatcherFactoryTaZQt@Base 12
+ _D3std5regex8internal2ir__T15SmallFixedArrayTSQBsQBrQBoQBi__T5GroupTmZQjVki3ZQBy10__postblitMFNaNbNiNfZv@Base 12
+ _D3std5regex8internal2ir__T15SmallFixedArrayTSQBsQBrQBoQBi__T5GroupTmZQjVki3ZQBy10abandonRefMFNaNbNiNeZv@Base 12
+ _D3std5regex8internal2ir__T15SmallFixedArrayTSQBsQBrQBoQBi__T5GroupTmZQjVki3ZQBy13internalSliceMNgFNaNbNdNiNeZANgSQEiQEhQEeQDy__TQCqTmZQCw@Base 12
+ _D3std5regex8internal2ir__T15SmallFixedArrayTSQBsQBrQBoQBi__T5GroupTmZQjVki3ZQBy5isBigMxFNaNbNdNiNfZb@Base 12
+ _D3std5regex8internal2ir__T15SmallFixedArrayTSQBsQBrQBoQBi__T5GroupTmZQjVki3ZQBy6__ctorMFNaNbNcNimZSQDuQDtQDqQDk__TQDkTQCwVki3ZQDw@Base 12
+ _D3std5regex8internal2ir__T15SmallFixedArrayTSQBsQBrQBoQBi__T5GroupTmZQjVki3ZQBy6__dtorMFNaNbNiNfZv@Base 12
+ _D3std5regex8internal2ir__T15SmallFixedArrayTSQBsQBrQBoQBi__T5GroupTmZQjVki3ZQBy6__initZ@Base 12
+ _D3std5regex8internal2ir__T15SmallFixedArrayTSQBsQBrQBoQBi__T5GroupTmZQjVki3ZQBy6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std5regex8internal2ir__T15SmallFixedArrayTSQBsQBrQBoQBi__T5GroupTmZQjVki3ZQBy6mutateMFNaMDFNaAQCaZvZv@Base 12
+ _D3std5regex8internal2ir__T15SmallFixedArrayTSQBsQBrQBoQBi__T5GroupTmZQjVki3ZQBy6toHashMxFNaNbNiNfZm@Base 12
+ _D3std5regex8internal2ir__T15SmallFixedArrayTSQBsQBrQBoQBi__T5GroupTmZQjVki3ZQBy7Payload3ptrMNgFNaNbNiZPNgSQEbQEaQDxQDr__TQCjTmZQCp@Base 12
+ _D3std5regex8internal2ir__T15SmallFixedArrayTSQBsQBrQBoQBi__T5GroupTmZQjVki3ZQBy7Payload6__initZ@Base 12
+ _D3std5regex8internal2ir__T15SmallFixedArrayTSQBsQBrQBoQBi__T5GroupTmZQjVki3ZQBy7opIndexMNgFNaNbNcNiNfmZNgSQEbQEaQDxQDr__TQCjTmZQCp@Base 12
+ _D3std5regex8internal2ir__T15SmallFixedArrayTSQBsQBrQBoQBi__T5GroupTmZQjVki3ZQBy8opAssignMFNaNbNcNiNeSQDwQDvQDsQDm__TQDmTQCyVki3ZQDyZQBg@Base 12
+ _D3std5regex8internal2ir__T15SmallFixedArrayTSQBsQBrQBoQBi__T5GroupTmZQjVki3ZQBy8opEqualsMFNaNbNiNfSQDuQDtQDqQDk__TQDkTQCwVki3ZQDwZb@Base 12
+ _D3std5regex8internal2ir__T18assumePureFunctionTPFNbNfKxSQCdQCcQBzQBt__T5RegexTaZQjZCQDfQDeQDbQCv__T14MatcherFactoryTaZQtZQDrFNaNbNiQDgZPFNaNbNfKxQDmZQCo@Base 12
+ _D3std5regex8internal2ir__T18defaultFactoryImplTaZQxFKxSQCcQCbQByQBs__T5RegexTaZQjZ15thompsonFactoryCQDvQDuQDrQDl__T14MatcherFactoryTaZQt@Base 12
+ _D3std5regex8internal2ir__T18defaultFactoryImplTaZQxFKxSQCcQCbQByQBs__T5RegexTaZQjZ19backtrackingFactoryCQDzQDyQDvQDp__T14MatcherFactoryTaZQt@Base 12
+ _D3std5regex8internal2ir__T18defaultFactoryImplTaZQxFNbNfKxSQCgQCfQCcQBw__T5RegexTaZQjZCQDiQDhQDeQCy__T14MatcherFactoryTaZQt@Base 12
+ _D3std5regex8internal2ir__T5GroupTmZQj6__initZ@Base 12
+ _D3std5regex8internal2ir__T5GroupTmZQj__T6opCastHTbZQlMxFNaNbNiNfZb@Base 12
+ _D3std5regex8internal2ir__T5InputTaZQj11__xopEqualsMxFKxSQCdQCcQBzQBt__TQBtTaZQBzZb@Base 12
+ _D3std5regex8internal2ir__T5InputTaZQj5atEndMFNaNbNdNiNfZb@Base 12
+ _D3std5regex8internal2ir__T5InputTaZQj5resetMFNaNbNiNfmZv@Base 12
+ _D3std5regex8internal2ir__T5InputTaZQj6__ctorMFNaNbNcNiNfAxamZSQCjQCiQCfQBz__TQBzTaZQCf@Base 12
+ _D3std5regex8internal2ir__T5InputTaZQj6__initZ@Base 12
+ _D3std5regex8internal2ir__T5InputTaZQj7opSliceMFNaNbNiNfmmZAxa@Base 12
+ _D3std5regex8internal2ir__T5InputTaZQj8loopBackMFNaNbNiNfmZSQCgQCfQCcQBw__T14BackLooperImplTSQDnQDmQDjQDd__TQDdTaZQDjZQBr@Base 12
+ _D3std5regex8internal2ir__T5InputTaZQj9__xtoHashFNbNeKxSQCcQCbQByQBs__TQBsTaZQByZm@Base 12
+ _D3std5regex8internal2ir__T5InputTaZQj9lastIndexMFNaNbNdNiNfZm@Base 12
+ _D3std5regex8internal2ir__T5InputTaZQj__T6searchTSQBwQBvQBs9kickstart__T7ShiftOrTaZQlZQBtMFNaNfKxSQDsQDrQDoQBw__TQBpTaZQBvKwKmZb@Base 12
+ _D3std5regex8internal2ir__T5RegexTaZQj10withNGroupMxFNaNbNiNekZxSQClQCkQChQCb__TQCbTaZQCh@Base 12
+ _D3std5regex8internal2ir__T5RegexTaZQj11__xopEqualsMxFKxSQCdQCcQBzQBt__TQBtTaZQBzZb@Base 12
+ _D3std5regex8internal2ir__T5RegexTaZQj11withFactoryMxFNaNbNiNeCQCjQCiQCfQBz__T14MatcherFactoryTaZQtZxSQDwQDvQDsQDm__TQDmTaZQDs@Base 12
+ _D3std5regex8internal2ir__T5RegexTaZQj13namedCapturesMFNaNbNdNiNfZSQCnQCmQCjQCd__TQCdTaZQCjQCbMFNdNfZ15NamedGroupRange@Base 12
+ _D3std5regex8internal2ir__T5RegexTaZQj13namedCapturesMFNdNfZ15NamedGroupRange11__xopEqualsMxFKxSQDqQDpQDmQDg__TQDgTaZQDmQDeMFNdNfZQCsZb@Base 12
+ _D3std5regex8internal2ir__T5RegexTaZQj13namedCapturesMFNdNfZ15NamedGroupRange4backMFNaNbNdNiNfZAya@Base 12
+ _D3std5regex8internal2ir__T5RegexTaZQj13namedCapturesMFNdNfZ15NamedGroupRange4saveMFNaNbNdNiNfZSQDqQDpQDmQDg__TQDgTaZQDmQDeMFNdNfZQCs@Base 12
+ _D3std5regex8internal2ir__T5RegexTaZQj13namedCapturesMFNdNfZ15NamedGroupRange5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std5regex8internal2ir__T5RegexTaZQj13namedCapturesMFNdNfZ15NamedGroupRange5frontMFNaNbNdNiNfZAya@Base 12
+ _D3std5regex8internal2ir__T5RegexTaZQj13namedCapturesMFNdNfZ15NamedGroupRange6__ctorMFNaNbNcNiNfAxSQDtQDsQDpQDj10NamedGroupmmZSQEvQEuQErQEl__TQElTaZQErQEjMFNdNfZQDx@Base 12
+ _D3std5regex8internal2ir__T5RegexTaZQj13namedCapturesMFNdNfZ15NamedGroupRange6__initZ@Base 12
+ _D3std5regex8internal2ir__T5RegexTaZQj13namedCapturesMFNdNfZ15NamedGroupRange6lengthMFNaNbNdNiNfZm@Base 12
+ _D3std5regex8internal2ir__T5RegexTaZQj13namedCapturesMFNdNfZ15NamedGroupRange7opSliceMFNaNbNiNfZSQDrQDqQDnQDh__TQDhTaZQDnQDfMFNdNfZQCt@Base 12
+ _D3std5regex8internal2ir__T5RegexTaZQj13namedCapturesMFNdNfZ15NamedGroupRange7opSliceMFNaNbNiNfmmZSQDtQDsQDpQDj__TQDjTaZQDpQDhMFNdNfZQCv@Base 12
+ _D3std5regex8internal2ir__T5RegexTaZQj13namedCapturesMFNdNfZ15NamedGroupRange7popBackMFNaNbNiNfZv@Base 12
+ _D3std5regex8internal2ir__T5RegexTaZQj13namedCapturesMFNdNfZ15NamedGroupRange8popFrontMFNaNbNiNfZv@Base 12
+ _D3std5regex8internal2ir__T5RegexTaZQj13namedCapturesMFNdNfZ15NamedGroupRange9__xtoHashFNbNeKxSQDpQDoQDlQDf__TQDfTaZQDlQDdMFNdNfZQCrZm@Base 12
+ _D3std5regex8internal2ir__T5RegexTaZQj14checkIfOneShotMFNaNfZv@Base 12
+ _D3std5regex8internal2ir__T5RegexTaZQj5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std5regex8internal2ir__T5RegexTaZQj6__initZ@Base 12
+ _D3std5regex8internal2ir__T5RegexTaZQj8withCodeMxFNaNbNeAxSQCfQCeQCbQBv8BytecodeZxSQDdQDcQCzQCt__TQCtTaZQCz@Base 12
+ _D3std5regex8internal2ir__T5RegexTaZQj9__xtoHashFNbNeKxSQCcQCbQByQBs__TQBsTaZQByZm@Base 12
+ _D3std5regex8internal2ir__T5RegexTaZQj9isBackrefMFNaNbNiNfkZk@Base 12
+ _D3std5regex8internal2ir__T5RegexTaZQj9withFlagsMxFNaNbNiNekZxSQCjQCiQCfQBz__TQBzTaZQCf@Base 12
+ _D3std5regex8internal2ir__T7MatcherTaZQl6__initZ@Base 12
+ _D3std5regex8internal2ir__T7MatcherTaZQl6__vtblZ@Base 12
+ _D3std5regex8internal2ir__T7MatcherTaZQl7__ClassZ@Base 12
+ _D3std5regex8internal2ir__T9endOfLineZQlFNaNbNiNfwbZb@Base 12
+ _D3std5regex8internal5tests11__moduleRefZ@Base 12
+ _D3std5regex8internal5tests12__ModuleInfoZ@Base 12
+ _D3std5regex8internal6parser11__moduleRefZ@Base 12
+ _D3std5regex8internal6parser12__ModuleInfoZ@Base 12
+ _D3std5regex8internal6parser7CodeGen10endPatternMFkZv@Base 12
+ _D3std5regex8internal6parser7CodeGen11__xopEqualsMxFKxSQCbQCaQBxQBrQBnZb@Base 12
+ _D3std5regex8internal6parser7CodeGen11charsetToIrMFNeSQCa3uni__T13InversionListTSQDbQBb8GcPolicyZQBhZv@Base 12
+ _D3std5regex8internal6parser7CodeGen11fixupLengthMFNdZm@Base 12
+ _D3std5regex8internal6parser7CodeGen11isOpenGroupMFkZb@Base 12
+ _D3std5regex8internal6parser7CodeGen11markBackrefMFkZv@Base 12
+ _D3std5regex8internal6parser7CodeGen13fixLookaroundMFkZv@Base 12
+ _D3std5regex8internal6parser7CodeGen13fixRepetitionMFkZv@Base 12
+ _D3std5regex8internal6parser7CodeGen13fixRepetitionMFkkkbZv@Base 12
+ _D3std5regex8internal6parser7CodeGen13genLogicGroupMFZv@Base 12
+ _D3std5regex8internal6parser7CodeGen13genLookaroundMFEQCaQBzQBw2ir2IRZv@Base 12
+ _D3std5regex8internal6parser7CodeGen13genNamedGroupMFAyaZv@Base 12
+ _D3std5regex8internal6parser7CodeGen14fixAlternationMFZv@Base 12
+ _D3std5regex8internal6parser7CodeGen17finishAlternationMFkZv@Base 12
+ _D3std5regex8internal6parser7CodeGen3putMFSQBpQBoQBl2ir8BytecodeZv@Base 12
+ _D3std5regex8internal6parser7CodeGen5startMFkZv@Base 12
+ _D3std5regex8internal6parser7CodeGen6__initZ@Base 12
+ _D3std5regex8internal6parser7CodeGen6lengthMFNdZk@Base 12
+ _D3std5regex8internal6parser7CodeGen6putRawMFkZv@Base 12
+ _D3std5regex8internal6parser7CodeGen7onCloseMFZSQBu8typecons__T5TupleTbTkZQl@Base 12
+ _D3std5regex8internal6parser7CodeGen8genGroupMFZv@Base 12
+ _D3std5regex8internal6parser7CodeGen8popFixupMFZk@Base 12
+ _D3std5regex8internal6parser7CodeGen8topFixupMFNdZk@Base 12
+ _D3std5regex8internal6parser7CodeGen9__xtoHashFNbNeKxSQCaQBzQBwQBqQBmZm@Base 12
+ _D3std5regex8internal6parser7CodeGen9pushFixupMFkZv@Base 12
+ _D3std5regex8internal6parser__T11postprocessTaZQqFNeKSQCaQBzQBw2ir__T5RegexTaZQjZ__T10FixedStackTkZQp11__xopEqualsMxFKxSQEoQEnQEkQEe__TQEaTaZQEgFNeKQDrZ__TQCtTkZQCzZb@Base 12
+ _D3std5regex8internal6parser__T11postprocessTaZQqFNeKSQCaQBzQBw2ir__T5RegexTaZQjZ__T10FixedStackTkZQp3popMFNaNbNiNfZk@Base 12
+ _D3std5regex8internal6parser__T11postprocessTaZQqFNeKSQCaQBzQBw2ir__T5RegexTaZQjZ__T10FixedStackTkZQp3topMFNaNbNcNdNiNfZk@Base 12
+ _D3std5regex8internal6parser__T11postprocessTaZQqFNeKSQCaQBzQBw2ir__T5RegexTaZQjZ__T10FixedStackTkZQp4pushMFNaNbNiNfkZv@Base 12
+ _D3std5regex8internal6parser__T11postprocessTaZQqFNeKSQCaQBzQBw2ir__T5RegexTaZQjZ__T10FixedStackTkZQp5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std5regex8internal6parser__T11postprocessTaZQqFNeKSQCaQBzQBw2ir__T5RegexTaZQjZ__T10FixedStackTkZQp6__initZ@Base 12
+ _D3std5regex8internal6parser__T11postprocessTaZQqFNeKSQCaQBzQBw2ir__T5RegexTaZQjZ__T10FixedStackTkZQp9__xtoHashFNbNeKxSQEnQEmQEjQEd__TQDzTaZQEfFNeKQDqZ__TQCsTkZQCyZm@Base 12
+ _D3std5regex8internal6parser__T11postprocessTaZQqFNeKSQCaQBzQBw2ir__T5RegexTaZQjZv@Base 12
+ _D3std5regex8internal6parser__T13fixupBytecodeZQqFNfASQCaQBzQBw2ir8BytecodeZv@Base 12
+ _D3std5regex8internal6parser__T15reverseBytecodeZQsFNeASQCcQCbQBy2ir8BytecodeZv@Base 12
+ _D3std5regex8internal6parser__T6ParserTAyaTSQBqQBpQBmQBg7CodeGenZQBi10parseRegexMFNeZv@Base 12
+ _D3std5regex8internal6parser__T6ParserTAyaTSQBqQBpQBmQBg7CodeGenZQBi11__xopEqualsMxFKxSQDhQDgQDdQCx__TQCtTQCpTQCpZQDfZb@Base 12
+ _D3std5regex8internal6parser__T6ParserTAyaTSQBqQBpQBmQBg7CodeGenZQBi11parseEscapeMFNeZv@Base 12
+ _D3std5regex8internal6parser__T6ParserTAyaTSQBqQBpQBmQBg7CodeGenZQBi12parseCharsetMFNfZv@Base 12
+ _D3std5regex8internal6parser__T6ParserTAyaTSQBqQBpQBmQBg7CodeGenZQBi12parseDecimalMFNaNfZk@Base 12
+ _D3std5regex8internal6parser__T6ParserTAyaTSQBqQBpQBmQBg7CodeGenZQBi15parseQuantifierMFNekZv@Base 12
+ _D3std5regex8internal6parser__T6ParserTAyaTSQBqQBpQBmQBg7CodeGenZQBi4saveMFNaNbNiNfZSQDfQDeQDbQCv__TQCrTQCnTQCnZQDd@Base 12
+ _D3std5regex8internal6parser__T6ParserTAyaTSQBqQBpQBmQBg7CodeGenZQBi5errorMFNaNeQBpZv@Base 12
+ _D3std5regex8internal6parser__T6ParserTAyaTSQBqQBpQBmQBg7CodeGenZQBi6__initZ@Base 12
+ _D3std5regex8internal6parser__T6ParserTAyaTSQBqQBpQBmQBg7CodeGenZQBi7programMFNdNfZSQDeQDdQDa2ir__T5RegexTaZQj@Base 12
+ _D3std5regex8internal6parser__T6ParserTAyaTSQBqQBpQBmQBg7CodeGenZQBi8popFrontMFNaNfZv@Base 12
+ _D3std5regex8internal6parser__T6ParserTAyaTSQBqQBpQBmQBg7CodeGenZQBi9__xtoHashFNbNeKxSQDgQDfQDcQCw__TQCsTQCoTQCoZQDeZm@Base 12
+ _D3std5regex8internal6parser__T6ParserTAyaTSQBqQBpQBmQBg7CodeGenZQBi9_popFrontMFNaNfZv@Base 12
+ _D3std5regex8internal6parser__T6ParserTAyaTSQBqQBpQBmQBg7CodeGenZQBi9parseAtomMFZv@Base 12
+ _D3std5regex8internal6parser__T6ParserTAyaTSQBqQBpQBmQBg7CodeGenZQBi9skipSpaceMFNaNfZv@Base 12
+ _D3std5regex8internal6parser__T6ParserTAyaTSQBqQBpQBmQBg7CodeGenZQBi__T10parseFlagsTAxaZQrMFNaNeQmZv@Base 12
+ _D3std5regex8internal6parser__T6ParserTAyaTSQBqQBpQBmQBg7CodeGenZQBi__T6__ctorTAxaZQmMFNcNeQCaQpZSQDsQDrQDoQDi__TQDeTQDaTQDaZQDq@Base 12
+ _D3std5regex8internal6parser__T8optimizeTaZQmFKSQBuQBtQBq2ir__T5RegexTaZQjZv@Base 12
+ _D3std5regex8internal6parser__T9makeRegexTAyaTSQBtQBsQBpQBj7CodeGenZQBlFNfSQCvQCuQCrQCl__T6ParserTQCeTQCeZQqZSQEeQEdQEa2ir__T5RegexTaZQj@Base 12
+ _D3std5regex8internal6tests211__moduleRefZ@Base 12
+ _D3std5regex8internal6tests212__ModuleInfoZ@Base 12
+ _D3std5regex8internal8thompson11__moduleRefZ@Base 12
+ _D3std5regex8internal8thompson12__ModuleInfoZ@Base 12
+ _D3std5regex8internal8thompson__T10ThreadListTmZQp10insertBackMFNaNbNiNfPSQCuQCtQCqQCk__T6ThreadTmZQkZv@Base 12
+ _D3std5regex8internal8thompson__T10ThreadListTmZQp11ThreadRange5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std5regex8internal8thompson__T10ThreadListTmZQp11ThreadRange5frontMFNaNbNdNiNfZPxSQDfQDeQDbQCv__T6ThreadTmZQk@Base 12
+ _D3std5regex8internal8thompson__T10ThreadListTmZQp11ThreadRange6__ctorMFNaNbNcNiNfSQDdQDcQCzQCt__TQCnTmZQCtZSQEdQEcQDzQDt__TQDnTmZQDtQDf@Base 12
+ _D3std5regex8internal8thompson__T10ThreadListTmZQp11ThreadRange6__initZ@Base 12
+ _D3std5regex8internal8thompson__T10ThreadListTmZQp11ThreadRange8popFrontMFNaNbNiNfZv@Base 12
+ _D3std5regex8internal8thompson__T10ThreadListTmZQp11insertFrontMFNaNbNiNfPSQCvQCuQCrQCl__T6ThreadTmZQkZv@Base 12
+ _D3std5regex8internal8thompson__T10ThreadListTmZQp5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std5regex8internal8thompson__T10ThreadListTmZQp5fetchMFNaNbNiNfZPSQCpQCoQClQCf__T6ThreadTmZQk@Base 12
+ _D3std5regex8internal8thompson__T10ThreadListTmZQp6__initZ@Base 12
+ _D3std5regex8internal8thompson__T10ThreadListTmZQp7opSliceMFNaNbNiNfZSQCqQCpQCmQCg__TQCaTmZQCg11ThreadRange@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opHVEQHsQHrQHoQEo2IRi172ZQBaFNaNeQHbPQDeZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi128ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi129ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi130ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi132ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi133ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi134ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi136ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi137ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi138ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi140ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi141ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi142ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi144ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi145ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi146ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi148ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi149ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi150ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi152ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi153ZQzFNaNeQGzPQDcZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi154ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi156ZQzFNaNeQGzPQDcZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi157ZQzFNaNeQGzPQDcZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi158ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi160ZQzFNaNeQGzPQDcZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi161ZQzFNaNeQGzPQDcZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi162ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi164ZQzFNaNeQGzPQDcZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi165ZQzFNaNeQGzPQDcZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi166ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi168ZQzFNaNeQGzPQDcZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi176ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi180ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi184ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi188ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi192ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi0Z__T2opVEQHrQHqQHnQEn2IRi196ZQzFNaNeQGzPQDcZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opHVEQHsQHrQHoQEo2IRi128ZQBaFNaNbNiNeQHfPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opHVEQHsQHrQHoQEo2IRi129ZQBaFNaNbNiNeQHfPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opHVEQHsQHrQHoQEo2IRi130ZQBaFNaNbNiNeQHfPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opHVEQHsQHrQHoQEo2IRi132ZQBaFNaNbNiNeQHfPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opHVEQHsQHrQHoQEo2IRi133ZQBaFNaNbNiNeQHfPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opHVEQHsQHrQHoQEo2IRi136ZQBaFNaNbNiNeQHfPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opHVEQHsQHrQHoQEo2IRi137ZQBaFNaNbNiNeQHfPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opHVEQHsQHrQHoQEo2IRi140ZQBaFNaNbNiNeQHfPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opHVEQHsQHrQHoQEo2IRi141ZQBaFNaNbNiNeQHfPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opHVEQHsQHrQHoQEo2IRi144ZQBaFNaNbNiNeQHfPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opHVEQHsQHrQHoQEo2IRi145ZQBaFNaNbNiNeQHfPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opHVEQHsQHrQHoQEo2IRi148ZQBaFNaNbNiNeQHfPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opHVEQHsQHrQHoQEo2IRi149ZQBaFNaNbNiNeQHfPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opHVEQHsQHrQHoQEo2IRi152ZQBaFNaNbNiNeQHfPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opHVEQHsQHrQHoQEo2IRi156ZQBaFNaNeQHbPQDeZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opHVEQHsQHrQHoQEo2IRi160ZQBaFNaNeQHbPQDeZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opHVEQHsQHrQHoQEo2IRi164ZQBaFNaNeQHbPQDeZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opHVEQHsQHrQHoQEo2IRi168ZQBaFNaNeQHbPQDeZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opHVEQHsQHrQHoQEo2IRi172ZQBaFNaNeQHbPQDeZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opHVEQHsQHrQHoQEo2IRi176ZQBaFNaNbNiNeQHfPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opHVEQHsQHrQHoQEo2IRi180ZQBaFNaNbNiNeQHfPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opHVEQHsQHrQHoQEo2IRi184ZQBaFNaNbNiNeQHfPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opHVEQHsQHrQHoQEo2IRi188ZQBaFNaNbNiNeQHfPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opHVEQHsQHrQHoQEo2IRi192ZQBaFNaNbNiNeQHfPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opHVEQHsQHrQHoQEo2IRi196ZQBaFNaNeQHbPQDeZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opVEQHrQHqQHnQEn2IRi134ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opVEQHrQHqQHnQEn2IRi138ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opVEQHrQHqQHnQEn2IRi142ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opVEQHrQHqQHnQEn2IRi146ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opVEQHrQHqQHnQEn2IRi150ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opVEQHrQHqQHnQEn2IRi153ZQzFNaNeQGzPQDcZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opVEQHrQHqQHnQEn2IRi154ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opVEQHrQHqQHnQEn2IRi157ZQzFNaNeQGzPQDcZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opVEQHrQHqQHnQEn2IRi158ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opVEQHrQHqQHnQEn2IRi161ZQzFNaNeQGzPQDcZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opVEQHrQHqQHnQEn2IRi162ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opVEQHrQHqQHnQEn2IRi165ZQzFNaNeQGzPQDcZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T14BackLooperImplTSQElQEkQEhQBh__T5InputTaZQjZQBtZQDhTSQFvQFuQFrQFl__TQEbTaTQDnZQEl5StateHVbi1Z__T2opVEQHrQHqQHnQEn2IRi166ZQzFNaNbNiNeQHdPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opHVEQGhQGgQGdQDd2IRi172ZQBaFNaNeQFqPQDeZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi128ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi129ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi130ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi132ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi133ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi134ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi136ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi137ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi138ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi140ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi141ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi142ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi144ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi145ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi146ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi148ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi149ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi150ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi152ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi153ZQzFNaNeQFoPQDcZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi154ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi156ZQzFNaNeQFoPQDcZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi157ZQzFNaNeQFoPQDcZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi158ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi160ZQzFNaNeQFoPQDcZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi161ZQzFNaNeQFoPQDcZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi162ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi164ZQzFNaNeQFoPQDcZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi165ZQzFNaNeQFoPQDcZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi166ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi168ZQzFNaNeQFoPQDcZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi176ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi180ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi184ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi188ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi192ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi0Z__T2opVEQGgQGfQGcQDc2IRi196ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opHVEQGhQGgQGdQDd2IRi128ZQBaFNaNbNiNeQFuPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opHVEQGhQGgQGdQDd2IRi129ZQBaFNaNbNiNeQFuPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opHVEQGhQGgQGdQDd2IRi130ZQBaFNaNbNiNeQFuPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opHVEQGhQGgQGdQDd2IRi132ZQBaFNaNbNiNeQFuPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opHVEQGhQGgQGdQDd2IRi133ZQBaFNaNbNiNeQFuPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opHVEQGhQGgQGdQDd2IRi136ZQBaFNaNbNiNeQFuPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opHVEQGhQGgQGdQDd2IRi137ZQBaFNaNbNiNeQFuPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opHVEQGhQGgQGdQDd2IRi140ZQBaFNaNbNiNeQFuPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opHVEQGhQGgQGdQDd2IRi141ZQBaFNaNbNiNeQFuPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opHVEQGhQGgQGdQDd2IRi144ZQBaFNaNbNiNeQFuPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opHVEQGhQGgQGdQDd2IRi145ZQBaFNaNbNiNeQFuPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opHVEQGhQGgQGdQDd2IRi148ZQBaFNaNbNiNeQFuPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opHVEQGhQGgQGdQDd2IRi149ZQBaFNaNbNiNeQFuPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opHVEQGhQGgQGdQDd2IRi152ZQBaFNaNbNiNeQFuPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opHVEQGhQGgQGdQDd2IRi156ZQBaFNaNeQFqPQDeZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opHVEQGhQGgQGdQDd2IRi160ZQBaFNaNeQFqPQDeZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opHVEQGhQGgQGdQDd2IRi164ZQBaFNaNeQFqPQDeZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opHVEQGhQGgQGdQDd2IRi168ZQBaFNaNeQFqPQDeZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opHVEQGhQGgQGdQDd2IRi172ZQBaFNaNeQFqPQDeZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opHVEQGhQGgQGdQDd2IRi176ZQBaFNaNbNiNeQFuPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opHVEQGhQGgQGdQDd2IRi180ZQBaFNaNbNiNeQFuPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opHVEQGhQGgQGdQDd2IRi184ZQBaFNaNbNiNeQFuPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opHVEQGhQGgQGdQDd2IRi188ZQBaFNaNbNiNeQFuPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opHVEQGhQGgQGdQDd2IRi192ZQBaFNaNbNiNeQFuPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opHVEQGhQGgQGdQDd2IRi196ZQBaFNaNbNiNeQFuPQDiZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opVEQGgQGfQGcQDc2IRi134ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opVEQGgQGfQGcQDc2IRi138ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opVEQGgQGfQGcQDc2IRi142ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opVEQGgQGfQGcQDc2IRi146ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opVEQGgQGfQGcQDc2IRi150ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opVEQGgQGfQGcQDc2IRi153ZQzFNaNeQFoPQDcZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opVEQGgQGfQGcQDc2IRi154ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opVEQGgQGfQGcQDc2IRi157ZQzFNaNeQFoPQDcZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opVEQGgQGfQGcQDc2IRi158ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opVEQGgQGfQGcQDc2IRi161ZQzFNaNeQFoPQDcZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opVEQGgQGfQGcQDc2IRi162ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opVEQGgQGfQGcQDc2IRi165ZQzFNaNeQFoPQDcZb@Base 12
+ _D3std5regex8internal8thompson__T11ThompsonOpsTCQBuQBtQBqQBk__T15ThompsonMatcherTaTSQDeQDdQDa2ir__T5InputTaZQjZQBwTSQEkQEjQEgQEa__TQCqTaTQCcZQDa5StateHVbi1Z__T2opVEQGgQGfQGcQDc2IRi166ZQzFNaNbNiNeQFsPQDgZb@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh11createStartMFNaNbNiNemkZPSQFrQFqQFnQFh__T6ThreadTmZQk@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh12matchOneShotMFNaNeASQFlQFkQFhQDl__T5GroupTmZQjkZi@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh13getThreadSizeFNaNbNiNeKxSQFqQFpQFmQDq__T5RegexTaZQjZm@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh13initialMemoryFNaNbNiNeKxSQFqQFpQFmQDq__T5RegexTaZQjZm@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh15prepareFreeListMFNaNbNiNemKAvZv@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh18initExternalMemoryMFNaNeAvZv@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh4forkMFNaNbNiNePSQFgQFfQFcQEw__T6ThreadTmZQkkkZQBg@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh4nextMFNaNeZb@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh5State11__xopEqualsMxFKxSQFoQFnQFkQFe__TQEyTaTQEkZQFiQCbZb@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh5State6__initZ@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh5State9__xtoHashFNbNeKxSQFnQFmQFjQFd__TQExTaTQEjZQFhQCaZm@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh5State__T8popStateTCQFjQFiQFfQEz__TQEtTaTQEfZQFdZQBoMFNaNbNiNeQBrZb@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh5atEndMFNaNdNeZb@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh5dupToMFNeCQFaQEzQEwQDa__T7MatcherTaZQlAvZv@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh5matchMFNaNeASQFdQFcQEzQDd__T5GroupTmZQjZi@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh5rearmMFNeIAaZCQFeQFdQFaQDe__T7MatcherTaZQl@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh6__ctorMFNaNbNeCQFfQFeQFbQEv__TQEpTaTQCuZQEzmmkQElZCQGpQGoQGlQGf__TQFzTaTQFlZQGj@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh6__ctorMFNaNbNeCQFfQFeQFbQEv__TQEpTaTQEbZQEzmmkQElZQBk@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh6__initZ@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh6__vtblZ@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh6finishMFNaNbNiNePxSQFjQFiQFfQEz__T6ThreadTmZQkASQGmQGlQGiQEm__T5GroupTmZQjiZv@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh7__ClassZ@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh7atStartMFNaNbNdNiNeZb@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh7patternMFNcNdNfZxSQFiQFhQFeQDi__T5RegexTaZQj@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh7recycleMFNaNbNiNeKSQFjQFiQFfQEz__T10ThreadListTmZQpZv@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh7recycleMFNaNbNiNePSQFjQFiQFfQEz__T6ThreadTmZQkZv@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh8allocateMFNaNbNiNeZPSQFlQFkQFhQFb__T6ThreadTmZQk@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh8refCountMFNcNdNfZm@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh__T10bwdMatcherZQnMFNaNemmkmZCQFtQFsQFpQFj__TQFdTaTQDiZQFn@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh__T10fwdMatcherZQnMFNaNbNemmkmZCQFvQFuQFrQFl__TQFfTaTQErZQFp@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh__T4evalVbi0ZQkMFNaNePSQFmQFlQFiQFc__TQEwTaTQEiZQFg5StateZv@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh__T4evalVbi1ZQkMFNaNePSQFmQFlQFiQFc__TQEwTaTQEiZQFg5StateZv@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T14BackLooperImplTSQDhQDgQDdQBh__T5InputTaZQjZQBtZQDh__T9matchImplVbi0ZQpMFNaNeASQFrQFqQFnQDr__T5GroupTmZQjZi@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw11createStartMFNaNbNiNemkZPSQEgQEfQEcQDw__T6ThreadTmZQk@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw12matchOneShotMFNaNeASQEaQDzQDwQCa__T5GroupTmZQjkZi@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw13getThreadSizeFNaNbNiNeKxSQEfQEeQEbQCf__T5RegexTaZQjZm@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw13initialMemoryFNaNbNiNeKxSQEfQEeQEbQCf__T5RegexTaZQjZm@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw15prepareFreeListMFNaNbNiNemKAvZv@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw18initExternalMemoryMFNaNeAvZv@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw4forkMFNaNbNiNePSQDvQDuQDrQDl__T6ThreadTmZQkkkZQBg@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw4nextMFNaNeZb@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw5State11__xopEqualsMxFKxSQEdQEcQDzQDt__TQDnTaTQCzZQDxQCbZb@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw5State6__initZ@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw5State9__xtoHashFNbNeKxSQEcQEbQDyQDs__TQDmTaTQCyZQDwQCaZm@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw5State__T8popStateTCQDyQDxQDuQDo__TQDiTaTQCuZQDsZQBoMFNaNbNiNeQBrZb@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw5atEndMFNaNbNdNiNeZb@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw5dupToMFNeCQDpQDoQDlQBp__T7MatcherTaZQlAvZv@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw5matchMFNaNeASQDsQDrQDoQBs__T5GroupTmZQjZi@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw5rearmMFNeIAaZCQDtQDsQDpQBt__T7MatcherTaZQl@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw6__ctorMFNaNbNeCQDuQDtQDqQDk__TQDeTaTQCqZQDommkQDaZQBk@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw6__ctorMFNaNbNeCQDuQDtQDqQDk__TQDeTaTSQEqQEpQEmQCq__T14BackLooperImplTQDxZQvZQEymmkQEkZCQGoQGnQGkQGe__TQFyTaTQFkZQGi@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw6__initZ@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw6__vtblZ@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw6finishMFNaNbNiNePxSQDyQDxQDuQDo__T6ThreadTmZQkASQFbQFaQExQDb__T5GroupTmZQjiZv@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw6searchMFNaNeZb@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw7__ClassZ@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw7atStartMFNaNbNdNiNeZb@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw7patternMFNcNdNfZxSQDxQDwQDtQBx__T5RegexTaZQj@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw7recycleMFNaNbNiNeKSQDyQDxQDuQDo__T10ThreadListTmZQpZv@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw7recycleMFNaNbNiNePSQDyQDxQDuQDo__T6ThreadTmZQkZv@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw8allocateMFNaNbNiNeZPSQEaQDzQDwQDq__T6ThreadTmZQk@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw8refCountMFNcNdNfZm@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw__T10bwdMatcherZQnMFNaNemmkmZCQEiQEhQEeQDy__TQDsTaTSQFeQFdQFaQDe__T14BackLooperImplTQElZQvZQFm@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw__T10fwdMatcherZQnMFNaNbNemmkmZCQEkQEjQEgQEa__TQDuTaTQDgZQEe@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw__T4evalVbi0ZQkMFNaNePSQEbQEaQDxQDr__TQDlTaTQCxZQDv5StateZv@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw__T4evalVbi1ZQkMFNaNePSQEbQEaQDxQDr__TQDlTaTQCxZQDv5StateZv@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw__T6__ctorZQiMFNaNeKxSQEaQDzQDwQCa__T5RegexTaZQjQDbAvZCQFhQFgQFdQEx__TQErTaTQEdZQFb@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw__T9matchImplVbi0ZQpMFNaNeASQEgQEfQEcQCg__T5GroupTmZQjZi@Base 12
+ _D3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw__T9matchImplVbi1ZQpMFNaNeASQEgQEfQEcQCg__T5GroupTmZQjZi@Base 12
+ _D3std5regex8internal8thompson__T6ThreadTmZQk6__initZ@Base 12
+ _D3std5regex8internal9generator11__moduleRefZ@Base 12
+ _D3std5regex8internal9generator12__ModuleInfoZ@Base 12
+ _D3std5regex8internal9kickstart11__moduleRefZ@Base 12
+ _D3std5regex8internal9kickstart12__ModuleInfoZ@Base 12
+ _D3std5regex8internal9kickstart__T13effectiveSizeTaZQsFNaNbNiNfZk@Base 12
+ _D3std5regex8internal9kickstart__T7ShiftOrTaZQl11ShiftThread10setInvMaskMFNaNbNiNfkkZv@Base 12
+ _D3std5regex8internal9kickstart__T7ShiftOrTaZQl11ShiftThread11__xopEqualsMxFKxSQCzQCyQCvQCp__TQCiTaZQCoQCeZb@Base 12
+ _D3std5regex8internal9kickstart__T7ShiftOrTaZQl11ShiftThread3addMFNaNfwZv@Base 12
+ _D3std5regex8internal9kickstart__T7ShiftOrTaZQl11ShiftThread4fullMFNaNbNdNiNfZb@Base 12
+ _D3std5regex8internal9kickstart__T7ShiftOrTaZQl11ShiftThread6__ctorMFNaNbNcNiNfkkAkZSQDfQDeQDbQCv__TQCoTaZQCuQCk@Base 12
+ _D3std5regex8internal9kickstart__T7ShiftOrTaZQl11ShiftThread6__initZ@Base 12
+ _D3std5regex8internal9kickstart__T7ShiftOrTaZQl11ShiftThread7advanceMFNaNbNiNfkZv@Base 12
+ _D3std5regex8internal9kickstart__T7ShiftOrTaZQl11ShiftThread7setMaskMFNaNbNiNfkkZv@Base 12
+ _D3std5regex8internal9kickstart__T7ShiftOrTaZQl11ShiftThread9__xtoHashFNbNeKxSQCyQCxQCuQCo__TQChTaZQCnQCdZm@Base 12
+ _D3std5regex8internal9kickstart__T7ShiftOrTaZQl11ShiftThread__T3setS_DQCqQCpQCmQCg__TQBzTaZQCfQBv10setInvMaskMFNaNbNiNfkkZvZQCjMFNaNfwZv@Base 12
+ _D3std5regex8internal9kickstart__T7ShiftOrTaZQl11__xopEqualsMxFKxSQCmQClQCiQCc__TQBvTaZQCbZb@Base 12
+ _D3std5regex8internal9kickstart__T7ShiftOrTaZQl4forkFNaNbNiNfSQCiQChQCeQBy__TQBrTaZQBx11ShiftThreadkkZQBp@Base 12
+ _D3std5regex8internal9kickstart__T7ShiftOrTaZQl5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std5regex8internal9kickstart__T7ShiftOrTaZQl5fetchFNbNeKASQChQCgQCdQBx__TQBqTaZQBw11ShiftThreadZQBn@Base 12
+ _D3std5regex8internal9kickstart__T7ShiftOrTaZQl6__ctorMFNcNeKSQCiQChQCe2ir__T5RegexTaZQjAkZ10codeBoundsyAi@Base 12
+ _D3std5regex8internal9kickstart__T7ShiftOrTaZQl6__ctorMFNcNeKSQCiQChQCe2ir__T5RegexTaZQjAkZSQDmQDlQDiQDc__TQCvTaZQDb@Base 12
+ _D3std5regex8internal9kickstart__T7ShiftOrTaZQl6__initZ@Base 12
+ _D3std5regex8internal9kickstart__T7ShiftOrTaZQl6lengthMxFNaNbNdNiNfZk@Base 12
+ _D3std5regex8internal9kickstart__T7ShiftOrTaZQl6searchMxFNaNeAxamZm@Base 12
+ _D3std5regex8internal9kickstart__T7ShiftOrTaZQl7charLenFNaNbNiNfkZk@Base 12
+ _D3std5regex8internal9kickstart__T7ShiftOrTaZQl9__xtoHashFNbNeKxSQClQCkQChQCb__TQBuTaZQCaZm@Base 12
+ _D3std5regex__T10RegexMatchTAaZQq10__aggrDtorMFNeZv@Base 12
+ _D3std5regex__T10RegexMatchTAaZQq10__postblitMFNeZv@Base 12
+ _D3std5regex__T10RegexMatchTAaZQq11__fieldDtorMFNaNbNiNeZv@Base 12
+ _D3std5regex__T10RegexMatchTAaZQq14__aggrPostblitMFNeZv@Base 12
+ _D3std5regex__T10RegexMatchTAaZQq15__fieldPostblitMFNaNbNiNlNeZv@Base 12
+ _D3std5regex__T10RegexMatchTAaZQq3hitMFNaNbNdNiNeZQw@Base 12
+ _D3std5regex__T10RegexMatchTAaZQq3preMFNaNbNdNiNeZQw@Base 12
+ _D3std5regex__T10RegexMatchTAaZQq4postMFNaNbNdNiNeZQx@Base 12
+ _D3std5regex__T10RegexMatchTAaZQq4saveMFNbNeZSQBsQBr__TQBoTQBfZQBw@Base 12
+ _D3std5regex__T10RegexMatchTAaZQq5emptyMxFNaNbNdNiNeZb@Base 12
+ _D3std5regex__T10RegexMatchTAaZQq5frontMNgFNaNbNdNiNeZNgSQCdQCc__T8CapturesTQBwZQo@Base 12
+ _D3std5regex__T10RegexMatchTAaZQq6__dtorMFNeZv@Base 12
+ _D3std5regex__T10RegexMatchTAaZQq6__initZ@Base 12
+ _D3std5regex__T10RegexMatchTAaZQq8capturesMNgFNaNbNdNiNeZNgSQCgQCf__T8CapturesTQBzZQo@Base 12
+ _D3std5regex__T10RegexMatchTAaZQq8opAssignMFNcNjNeSQBxQBw__TQBtTQBkZQCbZQw@Base 12
+ _D3std5regex__T10RegexMatchTAaZQq8popFrontMFNeZv@Base 12
+ _D3std5regex__T10RegexMatchTAaZQq9__xtoHashFNbNeKxSQBxQBw__TQBtTQBkZQCbZm@Base 12
+ _D3std5regex__T10RegexMatchTAaZQq__T6__ctorTSQBrQBq8internal2ir__T5RegexTaZQjZQBqMFNcNeQChQBuZSQDpQDo__TQDlTQDcZQDt@Base 12
+ _D3std5regex__T10RegexMatchTAxaZQr10__aggrDtorMFNeZv@Base 12
+ _D3std5regex__T10RegexMatchTAxaZQr10__postblitMFNeZv@Base 12
+ _D3std5regex__T10RegexMatchTAxaZQr11__fieldDtorMFNaNbNiNeZv@Base 12
+ _D3std5regex__T10RegexMatchTAxaZQr14__aggrPostblitMFNeZv@Base 12
+ _D3std5regex__T10RegexMatchTAxaZQr15__fieldPostblitMFNaNbNiNlNeZv@Base 12
+ _D3std5regex__T10RegexMatchTAxaZQr3hitMFNaNbNdNiNeZQx@Base 12
+ _D3std5regex__T10RegexMatchTAxaZQr3preMFNaNbNdNiNeZQx@Base 12
+ _D3std5regex__T10RegexMatchTAxaZQr4postMFNaNbNdNiNeZQy@Base 12
+ _D3std5regex__T10RegexMatchTAxaZQr4saveMFNbNeZSQBtQBs__TQBpTQBgZQBx@Base 12
+ _D3std5regex__T10RegexMatchTAxaZQr5emptyMxFNaNbNdNiNeZb@Base 12
+ _D3std5regex__T10RegexMatchTAxaZQr5frontMNgFNaNbNdNiNeZNgSQCeQCd__T8CapturesTQBxZQo@Base 12
+ _D3std5regex__T10RegexMatchTAxaZQr6__dtorMFNeZv@Base 12
+ _D3std5regex__T10RegexMatchTAxaZQr6__initZ@Base 12
+ _D3std5regex__T10RegexMatchTAxaZQr8capturesMNgFNaNbNdNiNeZNgSQChQCg__T8CapturesTQCaZQo@Base 12
+ _D3std5regex__T10RegexMatchTAxaZQr8opAssignMFNcNjNeSQByQBx__TQBuTQBlZQCcZQw@Base 12
+ _D3std5regex__T10RegexMatchTAxaZQr8popFrontMFNeZv@Base 12
+ _D3std5regex__T10RegexMatchTAxaZQr9__xtoHashFNbNeKxSQByQBx__TQBuTQBlZQCcZm@Base 12
+ _D3std5regex__T10RegexMatchTAxaZQr__T6__ctorTSQBsQBr8internal2ir__T5RegexTaZQjZQBqMFNcNeQCiQBuZSQDqQDp__TQDmTQDdZQDu@Base 12
+ _D3std5regex__T5matchTAaTSQyQw8internal2ir__T5RegexTaZQjZQBqFNfQBpQBpZSQCrQCq__T10RegexMatchTQCtZQr@Base 12
+ _D3std5regex__T5matchTAxaTSQzQx8internal2ir__T5RegexTaZQjZQBrFNfQBqQBpZSQCsQCr__T10RegexMatchTQCuZQr@Base 12
+ _D3std5regex__T8CapturesTAaZQn11__fieldDtorMFNaNbNiNeZv@Base 12
+ _D3std5regex__T8CapturesTAaZQn12whichPatternMxFNaNbNdNiNfZi@Base 12
+ _D3std5regex__T8CapturesTAaZQn15__fieldPostblitMFNaNbNiNlNeZv@Base 12
+ _D3std5regex__T8CapturesTAaZQn3hitMFNaNbNdNiNeZQw@Base 12
+ _D3std5regex__T8CapturesTAaZQn3preMFNaNbNdNiNeZQw@Base 12
+ _D3std5regex__T8CapturesTAaZQn4backMFNaNbNdNiNeZQx@Base 12
+ _D3std5regex__T8CapturesTAaZQn4postMFNaNbNdNiNeZQx@Base 12
+ _D3std5regex__T8CapturesTAaZQn5emptyMxFNaNbNdNiNeZb@Base 12
+ _D3std5regex__T8CapturesTAaZQn5frontMFNaNbNdNiNeZQy@Base 12
+ _D3std5regex__T8CapturesTAaZQn6__ctorMFNaNbNcNiNeQykAxSQCbQCa8internal2ir10NamedGroupZSQDhQDg__TQDdTQCxZQDl@Base 12
+ _D3std5regex__T8CapturesTAaZQn6__ctorMFNcNeKSQBrQBq__T10RegexMatchTQBqZQrZSQCvQCu__TQCrTQClZQCz@Base 12
+ _D3std5regex__T8CapturesTAaZQn6__initZ@Base 12
+ _D3std5regex__T8CapturesTAaZQn6lengthMxFNaNbNdNiNeZm@Base 12
+ _D3std5regex__T8CapturesTAaZQn7popBackMFNaNbNiNeZv@Base 12
+ _D3std5regex__T8CapturesTAaZQn8capturesMFNaNbNcNdNiNeZSQCbQCa__TQBxTQBrZQCf@Base 12
+ _D3std5regex__T8CapturesTAaZQn8getMatchMNgFNaNbNiNemZNgAa@Base 12
+ _D3std5regex__T8CapturesTAaZQn8opAssignMFNaNbNcNiNjNeSQCaQBz__TQBwTQBqZQCeZQw@Base 12
+ _D3std5regex__T8CapturesTAaZQn8popFrontMFNaNbNiNeZv@Base 12
+ _D3std5regex__T8CapturesTAaZQn9__xtoHashFNbNeKxSQBuQBt__TQBqTQBkZQByZm@Base 12
+ _D3std5regex__T8CapturesTAaZQn__T7opIndexZQjMNgFNaNbNemZNgAa@Base 12
+ _D3std5regex__T8CapturesTAxaZQo11__fieldDtorMFNaNbNiNeZv@Base 12
+ _D3std5regex__T8CapturesTAxaZQo12whichPatternMxFNaNbNdNiNfZi@Base 12
+ _D3std5regex__T8CapturesTAxaZQo15__fieldPostblitMFNaNbNiNlNeZv@Base 12
+ _D3std5regex__T8CapturesTAxaZQo3hitMFNaNbNdNiNeZQx@Base 12
+ _D3std5regex__T8CapturesTAxaZQo3preMFNaNbNdNiNeZQx@Base 12
+ _D3std5regex__T8CapturesTAxaZQo4backMFNaNbNdNiNeZQy@Base 12
+ _D3std5regex__T8CapturesTAxaZQo4postMFNaNbNdNiNeZQy@Base 12
+ _D3std5regex__T8CapturesTAxaZQo5emptyMxFNaNbNdNiNeZb@Base 12
+ _D3std5regex__T8CapturesTAxaZQo5frontMFNaNbNdNiNeZQz@Base 12
+ _D3std5regex__T8CapturesTAxaZQo6__ctorMFNaNbNcNiNeQzkAxSQCcQCb8internal2ir10NamedGroupZSQDiQDh__TQDeTQCyZQDm@Base 12
+ _D3std5regex__T8CapturesTAxaZQo6__ctorMFNcNeKSQBsQBr__T10RegexMatchTQBrZQrZSQCwQCv__TQCsTQCmZQDa@Base 12
+ _D3std5regex__T8CapturesTAxaZQo6__initZ@Base 12
+ _D3std5regex__T8CapturesTAxaZQo6lengthMxFNaNbNdNiNeZm@Base 12
+ _D3std5regex__T8CapturesTAxaZQo7popBackMFNaNbNiNeZv@Base 12
+ _D3std5regex__T8CapturesTAxaZQo8capturesMFNaNbNcNdNiNeZSQCcQCb__TQByTQBsZQCg@Base 12
+ _D3std5regex__T8CapturesTAxaZQo8getMatchMNgFNaNbNiNemZNgANgxa@Base 12
+ _D3std5regex__T8CapturesTAxaZQo8opAssignMFNaNbNcNiNjNeSQCbQCa__TQBxTQBrZQCfZQw@Base 12
+ _D3std5regex__T8CapturesTAxaZQo8popFrontMFNaNbNiNeZv@Base 12
+ _D3std5regex__T8CapturesTAxaZQo9__xtoHashFNbNeKxSQBvQBu__TQBrTQBlZQBzZm@Base 12
+ _D3std5regex__T8CapturesTAxaZQo__T7opIndexZQjMNgFNaNbNemZNgANgxa@Base 12
+ _D3std5regex__T9regexImplTAyaZQpFNfxAyaAxaZSQBqQBp8internal2ir__T5RegexTaZQj@Base 12
+ _D3std5regex__TQjHTAyaTyaZQuFNexAAyaAxaZSQBnQBm8internal2ir__T5RegexTaZQj@Base 12
+ _D3std5regex__TQjTAyaZQqFNeQjAxaZSQBgQBf8internal2ir__T5RegexTaZQj@Base 12
+ _D3std5stdio10ChunksImpl11__fieldDtorMFNeZv@Base 12
+ _D3std5stdio10ChunksImpl11__xopEqualsMxFKxSQBpQBoQBlZb@Base 12
+ _D3std5stdio10ChunksImpl15__fieldPostblitMFNbNlNeZv@Base 12
+ _D3std5stdio10ChunksImpl6__ctorMFNcSQBiQBh4FilemZSQBwQBvQBs@Base 12
+ _D3std5stdio10ChunksImpl6__initZ@Base 12
+ _D3std5stdio10ChunksImpl8opAssignMFNcNjNeSQBoQBnQBkZQl@Base 12
+ _D3std5stdio10ChunksImpl9__xtoHashFNbNeKxSQBoQBnQBkZm@Base 12
+ _D3std5stdio10readlnImplFPOS4core4stdcQBg8_IO_FILEKAawEQCbQCa4File11OrientationZ1nm@Base 12
+ _D3std5stdio10readlnImplFPOS4core4stdcQBg8_IO_FILEKAawEQCbQCa4File11OrientationZ7lineptrPa@Base 12
+ _D3std5stdio10readlnImplFPOS4core4stdcQBg8_IO_FILEKAawEQCbQCa4File11OrientationZm@Base 12
+ _D3std5stdio11__moduleRefZ@Base 12
+ _D3std5stdio11openNetworkFAyatZSQBeQBd4File@Base 12
+ _D3std5stdio12__ModuleInfoZ@Base 12
+ _D3std5stdio13StdFileHandle6__initZ@Base 12
+ _D3std5stdio13trustedStdoutFNdNeZSQBgQBf4File@Base 12
+ _D3std5stdio14ReadlnAppender10initializeMFAaZv@Base 12
+ _D3std5stdio14ReadlnAppender11__xopEqualsMxFKxSQBtQBsQBpZb@Base 12
+ _D3std5stdio14ReadlnAppender24reserveWithoutAllocatingMFmZb@Base 12
+ _D3std5stdio14ReadlnAppender4dataMFNdNeZAa@Base 12
+ _D3std5stdio14ReadlnAppender6__initZ@Base 12
+ _D3std5stdio14ReadlnAppender7putcharMFNeaZv@Base 12
+ _D3std5stdio14ReadlnAppender7putonlyMFNeAaZv@Base 12
+ _D3std5stdio14ReadlnAppender7reserveMFNemZv@Base 12
+ _D3std5stdio14ReadlnAppender8putdcharMFNewZv@Base 12
+ _D3std5stdio14ReadlnAppender9__xtoHashFNbNeKxSQBsQBrQBoZm@Base 12
+ _D3std5stdio14StdioException6__ctorMFNeAyakZCQBrQBqQBn@Base 12
+ _D3std5stdio14StdioException6__initZ@Base 12
+ _D3std5stdio14StdioException6__vtblZ@Base 12
+ _D3std5stdio14StdioException6opCallFAyaZv@Base 12
+ _D3std5stdio14StdioException6opCallFZv@Base 12
+ _D3std5stdio14StdioException7__ClassZ@Base 12
+ _D3std5stdio17LockingTextReader10__aggrDtorMFZv@Base 12
+ _D3std5stdio17LockingTextReader10__postblitMFZv@Base 12
+ _D3std5stdio17LockingTextReader11__fieldDtorMFNeZv@Base 12
+ _D3std5stdio17LockingTextReader11__xopEqualsMxFKxSQBwQBvQBsZb@Base 12
+ _D3std5stdio17LockingTextReader14__aggrPostblitMFZv@Base 12
+ _D3std5stdio17LockingTextReader15__fieldPostblitMFNbNlNeZv@Base 12
+ _D3std5stdio17LockingTextReader5emptyMFNdZb@Base 12
+ _D3std5stdio17LockingTextReader5frontMFNdZa@Base 12
+ _D3std5stdio17LockingTextReader6__ctorMFNcSQBpQBo4FileZSQCcQCbQBy@Base 12
+ _D3std5stdio17LockingTextReader6__dtorMFZv@Base 12
+ _D3std5stdio17LockingTextReader6__initZ@Base 12
+ _D3std5stdio17LockingTextReader8opAssignMFSQBpQBoQBlZv@Base 12
+ _D3std5stdio17LockingTextReader8popFrontMFZv@Base 12
+ _D3std5stdio17LockingTextReader9__xtoHashFNbNeKxSQBvQBuQBrZm@Base 12
+ _D3std5stdio4File10__postblitMFNbNfZv@Base 12
+ _D3std5stdio4File11ByChunkImpl11__fieldDtorMFNeZv@Base 12
+ _D3std5stdio4File11ByChunkImpl11__xopEqualsMxFKxSQBvQBuQBrQBpZb@Base 12
+ _D3std5stdio4File11ByChunkImpl15__fieldPostblitMFNbNlNeZv@Base 12
+ _D3std5stdio4File11ByChunkImpl5emptyMxFNbNdZb@Base 12
+ _D3std5stdio4File11ByChunkImpl5frontMFNbNdZAh@Base 12
+ _D3std5stdio4File11ByChunkImpl5primeMFZv@Base 12
+ _D3std5stdio4File11ByChunkImpl6__ctorMFNcSQBoQBnQBkAhZSQCbQCaQBxQBv@Base 12
+ _D3std5stdio4File11ByChunkImpl6__ctorMFNcSQBoQBnQBkmZSQCaQBzQBwQBu@Base 12
+ _D3std5stdio4File11ByChunkImpl6__initZ@Base 12
+ _D3std5stdio4File11ByChunkImpl8opAssignMFNcNjNeSQBuQBtQBqQBoZQo@Base 12
+ _D3std5stdio4File11ByChunkImpl8popFrontMFZv@Base 12
+ _D3std5stdio4File11ByChunkImpl9__xtoHashFNbNeKxSQBuQBtQBqQBoZm@Base 12
+ _D3std5stdio4File11__xopEqualsMxFKxSQBiQBhQBeZb@Base 12
+ _D3std5stdio4File12closeHandlesMFNeZv@Base 12
+ _D3std5stdio4File17LockingTextWriter10__aggrDtorMFNeZv@Base 12
+ _D3std5stdio4File17LockingTextWriter10__postblitMFNeZv@Base 12
+ _D3std5stdio4File17LockingTextWriter11__fieldDtorMFNeZv@Base 12
+ _D3std5stdio4File17LockingTextWriter11__xopEqualsMxFKxSQCbQCaQBxQBvZb@Base 12
+ _D3std5stdio4File17LockingTextWriter14__aggrPostblitMFNeZv@Base 12
+ _D3std5stdio4File17LockingTextWriter15__fieldPostblitMFNbNlNeZv@Base 12
+ _D3std5stdio4File17LockingTextWriter26highSurrogateShouldBeEmptyMFNfZv@Base 12
+ _D3std5stdio4File17LockingTextWriter6__ctorMFNcNeKSQBxQBwQBtZSQCiQChQCeQCc@Base 12
+ _D3std5stdio4File17LockingTextWriter6__dtorMFNeZv@Base 12
+ _D3std5stdio4File17LockingTextWriter6__initZ@Base 12
+ _D3std5stdio4File17LockingTextWriter7handle_MFNdNeZPS4core4stdcQCf8_IO_FILE@Base 12
+ _D3std5stdio4File17LockingTextWriter8opAssignMFNcNjNeSQCaQBzQBwQBuZQo@Base 12
+ _D3std5stdio4File17LockingTextWriter9__xtoHashFNbNeKxSQCaQBzQBwQBuZm@Base 12
+ _D3std5stdio4File17LockingTextWriter__T3putTAaZQiMFNfMQkZv@Base 12
+ _D3std5stdio4File17LockingTextWriter__T3putTAwZQiMFNfMQkZv@Base 12
+ _D3std5stdio4File17LockingTextWriter__T3putTAxaZQjMFNfMQlZv@Base 12
+ _D3std5stdio4File17LockingTextWriter__T3putTAyaZQjMFNfMQlZv@Base 12
+ _D3std5stdio4File17LockingTextWriter__T3putTaZQhMFNfaZv@Base 12
+ _D3std5stdio4File17LockingTextWriter__T3putTwZQhMFNfwZv@Base 12
+ _D3std5stdio4File17LockingTextWriter__T3putTxaZQiMFNfxaZv@Base 12
+ _D3std5stdio4File17LockingTextWriter__T3putTxwZQiMFNfxwZv@Base 12
+ _D3std5stdio4File17LockingTextWriter__T3putTyaZQiMFNfyaZv@Base 12
+ _D3std5stdio4File17lockingTextWriterMFNfZSQBoQBnQBk17LockingTextWriter@Base 12
+ _D3std5stdio4File19lockingBinaryWriterMFZSQBoQBnQBk__T16BinaryWriterImplVbi1ZQx@Base 12
+ _D3std5stdio4File3eofMxFNaNdNeZb@Base 12
+ _D3std5stdio4File4Impl6__initZ@Base 12
+ _D3std5stdio4File4lockMFEQxQv8LockTypemmZv@Base 12
+ _D3std5stdio4File4nameMxFNaNbNdNjNfZAya@Base 12
+ _D3std5stdio4File4openMFNeAyaMAxaZv@Base 12
+ _D3std5stdio4File4seekMFNeliZv@Base 12
+ _D3std5stdio4File4sizeMFNdNfZm@Base 12
+ _D3std5stdio4File4syncMFNeZv@Base 12
+ _D3std5stdio4File4tellMxFNdNeZm@Base 12
+ _D3std5stdio4File5closeMFNeZv@Base 12
+ _D3std5stdio4File5errorMxFNaNbNdNeZb@Base 12
+ _D3std5stdio4File5flushMFNeZv@Base 12
+ _D3std5stdio4File5getFPMFNaNfZPOS4core4stdcQBl8_IO_FILE@Base 12
+ _D3std5stdio4File5popenMFNfAyaMAxaZv@Base 12
+ _D3std5stdio4File6__ctorMFNcNePOS4core4stdcQBl8_IO_FILEAyakbZSQCiQChQCe@Base 12
+ _D3std5stdio4File6__ctorMFNcNfAyaMAxaZSQBlQBkQBh@Base 12
+ _D3std5stdio4File6__dtorMFNfZv@Base 12
+ _D3std5stdio4File6__initZ@Base 12
+ _D3std5stdio4File6detachMFNeZv@Base 12
+ _D3std5stdio4File6fdopenMFNeiMAxaAyaZv@Base 12
+ _D3std5stdio4File6fdopenMFNfiMAxaZv@Base 12
+ _D3std5stdio4File6filenoMxFNdNeZi@Base 12
+ _D3std5stdio4File6isOpenMxFNaNbNdNfZb@Base 12
+ _D3std5stdio4File6reopenMFNeAyaMAxaZv@Base 12
+ _D3std5stdio4File6rewindMFNfZv@Base 12
+ _D3std5stdio4File6unlockMFmmZv@Base 12
+ _D3std5stdio4File7byChunkMFAhZSQBdQBcQz11ByChunkImpl@Base 12
+ _D3std5stdio4File7byChunkMFmZSQBcQBbQy11ByChunkImpl@Base 12
+ _D3std5stdio4File7setvbufMFNeAviZv@Base 12
+ _D3std5stdio4File7setvbufMFNemiZv@Base 12
+ _D3std5stdio4File7tmpfileFNfZSQBcQBbQy@Base 12
+ _D3std5stdio4File7tryLockMFEQBaQz8LockTypemmZb@Base 12
+ _D3std5stdio4File8clearerrMFNaNbNfZv@Base 12
+ _D3std5stdio4File8initImplMFPOS4core4stdcQBj8_IO_FILEAyakbZv@Base 12
+ _D3std5stdio4File8lockImplMFismmZi@Base 12
+ _D3std5stdio4File8opAssignMFNcNjNfSQBhQBgQBdZQl@Base 12
+ _D3std5stdio4File8wrapFileFNfPOS4core4stdcQBk8_IO_FILEZSQCcQCbQBy@Base 12
+ _D3std5stdio4File9__xtoHashFNbNeKxSQBhQBgQBdZm@Base 12
+ _D3std5stdio4File9resetFileMFNeAyaMAxabZv@Base 12
+ _D3std5stdio4File__T16BinaryWriterImplVbi1ZQx10__aggrDtorMFZv@Base 12
+ _D3std5stdio4File__T16BinaryWriterImplVbi1ZQx10__postblitMFNbNiZv@Base 12
+ _D3std5stdio4File__T16BinaryWriterImplVbi1ZQx11__fieldDtorMFNeZv@Base 12
+ _D3std5stdio4File__T16BinaryWriterImplVbi1ZQx11__xopEqualsMxFKxSQCkQCjQCg__TQCeVbi1ZQCmZb@Base 12
+ _D3std5stdio4File__T16BinaryWriterImplVbi1ZQx14__aggrPostblitMFNbZv@Base 12
+ _D3std5stdio4File__T16BinaryWriterImplVbi1ZQx15__fieldPostblitMFNbNlNeZv@Base 12
+ _D3std5stdio4File__T16BinaryWriterImplVbi1ZQx6__ctorMFNcMKSQCfQCeQCbZSQCqQCpQCm__TQCkVbi1ZQCs@Base 12
+ _D3std5stdio4File__T16BinaryWriterImplVbi1ZQx6__dtorMFNbNiZv@Base 12
+ _D3std5stdio4File__T16BinaryWriterImplVbi1ZQx6__initZ@Base 12
+ _D3std5stdio4File__T16BinaryWriterImplVbi1ZQx8opAssignMFNcNjSQChQCgQCd__TQCbVbi1ZQCjZQz@Base 12
+ _D3std5stdio4File__T16BinaryWriterImplVbi1ZQx9__xtoHashFNbNeKxSQCjQCiQCf__TQCdVbi1ZQClZm@Base 12
+ _D3std5stdio4File__T6readlnTAyaZQmMFwZQk@Base 12
+ _D3std5stdio4File__T6readlnTaZQkMFKAawZm@Base 12
+ _D3std5stdio4File__T7rawReadTaZQlMFNfAaZQd@Base 12
+ _D3std5stdio4File__T7rawReadTbZQlMFNfAbZQd@Base 12
+ _D3std5stdio4File__T7rawReadThZQlMFNfAhZQd@Base 12
+ _D3std5stdio4File__T7rawReadTiZQlMFNfAiZQd@Base 12
+ _D3std5stdio4File__T7rawReadTlZQlMFNfAlZQd@Base 12
+ _D3std5stdio4File__T8writeflnTaTAyaTmTQgTQjTxlZQBbMFNfIAaQzmQBcQBfxlZv@Base 12
+ _D3std5stdio4File__T8writeflnTaTAyaTmTQgTQjTxmZQBbMFNfIAaQzmQBcQBfxmZv@Base 12
+ _D3std5stdio4File__T8writeflnTaTAyaTmTQgTxmZQyMFNfIAaQvmQyxmZv@Base 12
+ _D3std5stdio4File__T8writeflnTaTAyaTxmTQhTQkTmZQBbMFNfIAaQzxmQBdQBgmZv@Base 12
+ _D3std5stdio4File__T8writeflnTaTAyaTxmTQhTxmZQzMFNfIAaQwxmQBaxmZv@Base 12
+ _D3std5stdio5lines11__fieldDtorMFNeZv@Base 12
+ _D3std5stdio5lines11__xopEqualsMxFKxSQBjQBiQBfZb@Base 12
+ _D3std5stdio5lines15__fieldPostblitMFNbNlNeZv@Base 12
+ _D3std5stdio5lines6__ctorMFNcSQBcQBb4FilewZSQBqQBpQBm@Base 12
+ _D3std5stdio5lines6__initZ@Base 12
+ _D3std5stdio5lines8opAssignMFNcNjNeSQBiQBhQBeZQl@Base 12
+ _D3std5stdio5lines9__xtoHashFNbNeKxSQBiQBhQBeZm@Base 12
+ _D3std5stdio6chunksFSQtQr4FilemZSQBfQBe10ChunksImpl@Base 12
+ _D3std5stdio__T10makeGlobalVEQBbQBa13StdFileHandlea21_636f72652e737464632e737464696f2e737464696eZQDeFNbNcNdNiZSQEfQEe4File@Base 12
+ _D3std5stdio__T10makeGlobalVEQBbQBa13StdFileHandlea21_636f72652e737464632e737464696f2e737464696eZQDeFNcNdZ4implSQEgQEf4File4Impl@Base 12
+ _D3std5stdio__T10makeGlobalVEQBbQBa13StdFileHandlea21_636f72652e737464632e737464696f2e737464696eZQDeFNcNdZ6resultSQEiQEh4File@Base 12
+ _D3std5stdio__T10makeGlobalVEQBbQBa13StdFileHandlea21_636f72652e737464632e737464696f2e737464696eZQDeFNcNdZ8spinlockOk@Base 12
+ _D3std5stdio__T10makeGlobalVEQBbQBa13StdFileHandlea22_636f72652e737464632e737464696f2e737464657272ZQDgFNbNcNdNiZSQEhQEg4File@Base 12
+ _D3std5stdio__T10makeGlobalVEQBbQBa13StdFileHandlea22_636f72652e737464632e737464696f2e737464657272ZQDgFNcNdZ4implSQEiQEh4File4Impl@Base 12
+ _D3std5stdio__T10makeGlobalVEQBbQBa13StdFileHandlea22_636f72652e737464632e737464696f2e737464657272ZQDgFNcNdZ6resultSQEkQEj4File@Base 12
+ _D3std5stdio__T10makeGlobalVEQBbQBa13StdFileHandlea22_636f72652e737464632e737464696f2e737464657272ZQDgFNcNdZ8spinlockOk@Base 12
+ _D3std5stdio__T10makeGlobalVEQBbQBa13StdFileHandlea22_636f72652e737464632e737464696f2e7374646f7574ZQDgFNbNcNdNiZSQEhQEg4File@Base 12
+ _D3std5stdio__T10makeGlobalVEQBbQBa13StdFileHandlea22_636f72652e737464632e737464696f2e7374646f7574ZQDgFNcNdZ4implSQEiQEh4File4Impl@Base 12
+ _D3std5stdio__T10makeGlobalVEQBbQBa13StdFileHandlea22_636f72652e737464632e737464696f2e7374646f7574ZQDgFNcNdZ6resultSQEkQEj4File@Base 12
+ _D3std5stdio__T10makeGlobalVEQBbQBa13StdFileHandlea22_636f72652e737464632e737464696f2e7374646f7574ZQDgFNcNdZ8spinlockOk@Base 12
+ _D3std5stdio__T12trustedFreadTaZQrFNbNiNePOS4core4stdcQBw8_IO_FILEAaZm@Base 12
+ _D3std5stdio__T12trustedFreadTbZQrFNbNiNePOS4core4stdcQBw8_IO_FILEAbZm@Base 12
+ _D3std5stdio__T12trustedFreadThZQrFNbNiNePOS4core4stdcQBw8_IO_FILEAhZm@Base 12
+ _D3std5stdio__T12trustedFreadTiZQrFNbNiNePOS4core4stdcQBw8_IO_FILEAiZm@Base 12
+ _D3std5stdio__T12trustedFreadTlZQrFNbNiNePOS4core4stdcQBw8_IO_FILEAlZm@Base 12
+ _D3std5stdio__T13trustedFwriteTaZQsFNbNiNePOS4core4stdcQBx8_IO_FILExAaZm@Base 12
+ _D3std5stdio__T6_fopenTAyaTAxaZQqFNbNiNfQrQpZPOS4core4stdcQCa8_IO_FILE@Base 12
+ _D3std5stdio__T6_fopenTAyaTAxaZQqFQlQjZ10_fopenImplFNbNiNeMPxaMQeZPOS4core4stdcQCv8_IO_FILE@Base 12
+ _D3std5stdio__T6_popenTAyaTAxaZQqFNbNiNeQrQpZ9popenImplFNbNiNePxaQdZPOS4core4stdcQCx8_IO_FILE@Base 12
+ _D3std5stdio__T6_popenTAyaTAxaZQqFNbNiNeQrQpZPOS4core4stdcQCa8_IO_FILE@Base 12
+ _D3std6base6411__moduleRefZ@Base 12
+ _D3std6base6412__ModuleInfoZ@Base 12
+ _D3std6base6415Base64Exception6__ctorMFNaNbNfAyaQdmZCQBzQByQBu@Base 12
+ _D3std6base6415Base64Exception6__initZ@Base 12
+ _D3std6base6415Base64Exception6__vtblZ@Base 12
+ _D3std6base6415Base64Exception7__ClassZ@Base 12
+ _D3std6base64__T10Base64ImplVai43Vai47Vai61Z12decodeLengthFNaNbNfImZm@Base 12
+ _D3std6base64__T10Base64ImplVai43Vai47Vai61Z12encodeLengthFNaNbNiNfImZm@Base 12
+ _D3std6base64__T10Base64ImplVai43Vai47Vai61Z9DecodeMapyG256i@Base 12
+ _D3std6base64__T10Base64ImplVai43Vai47Vai61Z9EncodeMapyAa@Base 12
+ _D3std6base64__T10Base64ImplVai45Vai95Vai0Z12decodeLengthFNaNbNfImZm@Base 12
+ _D3std6base64__T10Base64ImplVai45Vai95Vai0Z12encodeLengthFNaNbNiNfImZm@Base 12
+ _D3std6base64__T10Base64ImplVai45Vai95Vai0Z9DecodeMapyG256i@Base 12
+ _D3std6base64__T10Base64ImplVai45Vai95Vai0Z9EncodeMapyAa@Base 12
+ _D3std6base64__T10Base64ImplVai45Vai95Vai61Z12decodeLengthFNaNbNfImZm@Base 12
+ _D3std6base64__T10Base64ImplVai45Vai95Vai61Z12encodeLengthFNaNbNiNfImZm@Base 12
+ _D3std6base64__T10Base64ImplVai45Vai95Vai61Z9DecodeMapyG256i@Base 12
+ _D3std6base64__T10Base64ImplVai45Vai95Vai61Z9EncodeMapyAa@Base 12
+ _D3std6bigint11__moduleRefZ@Base 12
+ _D3std6bigint12__ModuleInfoZ@Base 12
+ _D3std6bigint15toDecimalStringFNaNbNfxSQBlQBk6BigIntZAya@Base 12
+ _D3std6bigint5toHexFNfxSQwQu6BigIntZAya@Base 12
+ _D3std6bigint6BigInt10uintLengthMxFNaNbNdNiNfZm@Base 12
+ _D3std6bigint6BigInt11__xopEqualsMxFKxSQBlQBkQBgZb@Base 12
+ _D3std6bigint6BigInt11ulongLengthMxFNaNbNdNiNfZm@Base 12
+ _D3std6bigint6BigInt14checkDivByZeroMxFNaNbNlNfZv@Base 12
+ _D3std6bigint6BigInt5opCmpMxFNaNbNiNfKxSQBmQBlQBhZi@Base 12
+ _D3std6bigint6BigInt5toIntMxFNaNbNiNfZi@Base 12
+ _D3std6bigint6BigInt6__initZ@Base 12
+ _D3std6bigint6BigInt6isZeroMxFNaNbNiNlNfZb@Base 12
+ _D3std6bigint6BigInt6negateMFNaNbNiNlNfZv@Base 12
+ _D3std6bigint6BigInt6toHashMxFNaNbNiNfZm@Base 12
+ _D3std6bigint6BigInt6toLongMxFNaNbNiNfZl@Base 12
+ _D3std6bigint6BigInt8toStringMxFMDFMAxaZvAyaZv@Base 12
+ _D3std6bigint6BigInt8toStringMxFMDFMAxaZvMKxSQBr6format4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6bigint6BigInt__T10opOpAssignVAyaa1_25TSQBsQBrQBnZQBhMFNaNbNjNfQyZQBb@Base 12
+ _D3std6bigint6BigInt__T10opOpAssignVAyaa1_2aTSQBsQBrQBnZQBhMFNaNbNjNfQyZQBb@Base 12
+ _D3std6bigint6BigInt__T10opOpAssignVAyaa2_3e3eTiZQBaMFNaNbNjNfiZSQClQCkQCg@Base 12
+ _D3std6bigint6BigInt__T5opCmpHTSQBeQBdQzZQsMxFNaNbNiNfxSQCcQCbQBxZi@Base 12
+ _D3std6bigint6BigInt__T6__ctorTiZQkMFNaNbNcNfiZSQBuQBtQBp@Base 12
+ _D3std6bigint6BigInt__T6opCastHTbZQlMxFNaNbNiNfZb@Base 12
+ _D3std6bigint6BigInt__T8opAssignHTSQBhQBgQBcZQwMFNaNbNiNfQxZQBa@Base 12
+ _D3std6bigint6BigInt__T8opAssignTiZQmMFNaNbNfiZSQBuQBtQBp@Base 12
+ _D3std6bigint6BigInt__T8opBinaryVAyaa1_25TSQBpQBoQBkZQBeMxFNaNbNjNfQzZQBc@Base 12
+ _D3std6bigint6BigInt__T8opBinaryVAyaa1_2aTSQBpQBoQBkZQBeMxFNaNbNjNfQzZQBc@Base 12
+ _D3std6bigint6BigInt__T8opEqualsZQkMxFNaNbNiNfKxSQBvQBuQBqZb@Base 12
+ _D3std6bigint6BigInt__T8toStringTDFMAxaZvZQtMxFMKQqAyaZv@Base 12
+ _D3std6bigint6BigInt__T8toStringTDFMAxaZvZQtMxFMKQqMKxSQCb6format4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6bigint6BigInt__T8toStringTSQBg5array__T8AppenderTAyaZQoZQBoMxFNaNfMKQBqMKxSQDc6format4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6bigint6BigInt__T8toStringTSQBg5array__T8AppenderTAyaZQoZQBoMxFNaNfMKQBqQwZv@Base 12
+ _D3std6bigint6divModFNaNbNfxSQBbQBa6BigIntxQpJSQBtQBsQsJQkZv@Base 12
+ _D3std6bigint6powmodFNaNbNfSQBaQz6BigIntQnQpZQs@Base 12
+ _D3std6bigint__T9absUnsignTiZQnFNaNbNiNfiZk@Base 12
+ _D3std6digest10murmurhash11__moduleRefZ@Base 12
+ _D3std6digest10murmurhash12__ModuleInfoZ@Base 12
+ _D3std6digest11__moduleRefZ@Base 12
+ _D3std6digest12__ModuleInfoZ@Base 12
+ _D3std6digest2md11__moduleRefZ@Base 12
+ _D3std6digest2md12__ModuleInfoZ@Base 12
+ _D3std6digest2md3MD51FFNaNbNiNfkkkZk@Base 12
+ _D3std6digest2md3MD51GFNaNbNiNfkkkZk@Base 12
+ _D3std6digest2md3MD51HFNaNbNiNfkkkZk@Base 12
+ _D3std6digest2md3MD51IFNaNbNiNfkkkZk@Base 12
+ _D3std6digest2md3MD52FFFNaNbNiNfKkkkkkkkZv@Base 12
+ _D3std6digest2md3MD52GGFNaNbNiNfKkkkkkkkZv@Base 12
+ _D3std6digest2md3MD52HHFNaNbNiNfKkkkkkkkZv@Base 12
+ _D3std6digest2md3MD52IIFNaNbNiNfKkkkkkkkZv@Base 12
+ _D3std6digest2md3MD53putMFNaNbNiNeMAxhXv@Base 12
+ _D3std6digest2md3MD55startMFNaNbNiNfZv@Base 12
+ _D3std6digest2md3MD56__initZ@Base 12
+ _D3std6digest2md3MD56finishMFNaNbNiNeZG16h@Base 12
+ _D3std6digest2md3MD58_paddingyG64h@Base 12
+ _D3std6digest2md3MD59transformMFNaNbNiPxG64hZv@Base 12
+ _D3std6digest3crc11__moduleRefZ@Base 12
+ _D3std6digest3crc12__ModuleInfoZ@Base 12
+ _D3std6digest3crc__T3CRCVki32Vmi3988292384ZQx3putMFNaNbNiNeMAxhXv@Base 12
+ _D3std6digest3crc__T3CRCVki32Vmi3988292384ZQx4peekMxFNaNbNiNfZG4h@Base 12
+ _D3std6digest3crc__T3CRCVki32Vmi3988292384ZQx5startMFNaNbNiNfZv@Base 12
+ _D3std6digest3crc__T3CRCVki32Vmi3988292384ZQx6__initZ@Base 12
+ _D3std6digest3crc__T3CRCVki32Vmi3988292384ZQx6finishMFNaNbNiNfZG4h@Base 12
+ _D3std6digest3crc__T3CRCVki32Vmi3988292384ZQx6tablesyG8G256k@Base 12
+ _D3std6digest3crc__T3CRCVki64VmN2882303761517117440ZQBg3putMFNaNbNiNeMAxhXv@Base 12
+ _D3std6digest3crc__T3CRCVki64VmN2882303761517117440ZQBg4peekMxFNaNbNiNfZG8h@Base 12
+ _D3std6digest3crc__T3CRCVki64VmN2882303761517117440ZQBg5startMFNaNbNiNfZv@Base 12
+ _D3std6digest3crc__T3CRCVki64VmN2882303761517117440ZQBg6__initZ@Base 12
+ _D3std6digest3crc__T3CRCVki64VmN2882303761517117440ZQBg6finishMFNaNbNiNfZG8h@Base 12
+ _D3std6digest3crc__T3CRCVki64VmN2882303761517117440ZQBg6tablesyG8G256m@Base 12
+ _D3std6digest3crc__T3CRCVki64VmN3932672073523589310ZQBg3putMFNaNbNiNeMAxhXv@Base 12
+ _D3std6digest3crc__T3CRCVki64VmN3932672073523589310ZQBg4peekMxFNaNbNiNfZG8h@Base 12
+ _D3std6digest3crc__T3CRCVki64VmN3932672073523589310ZQBg5startMFNaNbNiNfZv@Base 12
+ _D3std6digest3crc__T3CRCVki64VmN3932672073523589310ZQBg6__initZ@Base 12
+ _D3std6digest3crc__T3CRCVki64VmN3932672073523589310ZQBg6finishMFNaNbNiNfZG8h@Base 12
+ _D3std6digest3crc__T3CRCVki64VmN3932672073523589310ZQBg6tablesyG8G256m@Base 12
+ _D3std6digest3crc__T9genTablesTkZQnFNaNbNiNfkZG8G256k@Base 12
+ _D3std6digest3crc__T9genTablesTmZQnFNaNbNiNfmZG8G256m@Base 12
+ _D3std6digest3sha11__moduleRefZ@Base 12
+ _D3std6digest3sha12__ModuleInfoZ@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki224ZQs12transformX86FNaNbNiPG5kPxG64hZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki224ZQs3putMFNaNbNiNeMAxhXv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki224ZQs5startMFNaNbNiNfZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki224ZQs6T_0_15FNaNbNiiPxG64hKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki224ZQs6__initZ@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki224ZQs6finishMFNaNbNiNeZG28h@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki224ZQs7T_16_19FNaNbNiNfiKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki224ZQs7T_20_39FNaNbNiNfiKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki224ZQs7T_40_59FNaNbNiNfiKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki224ZQs7T_60_79FNaNbNiNfiKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki224ZQs7paddingyG128h@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki224ZQs9constantsyG80m@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki224ZQs__T12T_SHA2_16_79TmZQrFNaNbNiNfiKG16mmmmKmmmmKmmZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki224ZQs__T13transformSHA2TmZQsFNaNbNiPG8mPxG128hZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki256ZQs12transformX86FNaNbNiPG5kPxG64hZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki256ZQs3putMFNaNbNiNeMAxhXv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki256ZQs5startMFNaNbNiNfZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki256ZQs6T_0_15FNaNbNiiPxG64hKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki256ZQs6__initZ@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki256ZQs6finishMFNaNbNiNeZG32h@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki256ZQs7T_16_19FNaNbNiNfiKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki256ZQs7T_20_39FNaNbNiNfiKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki256ZQs7T_40_59FNaNbNiNfiKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki256ZQs7T_60_79FNaNbNiNfiKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki256ZQs7paddingyG128h@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki256ZQs9constantsyG80m@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki256ZQs__T12T_SHA2_16_79TmZQrFNaNbNiNfiKG16mmmmKmmmmKmmZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki256ZQs__T13transformSHA2TmZQsFNaNbNiPG8mPxG128hZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki384ZQs12transformX86FNaNbNiPG5kPxG64hZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki384ZQs3putMFNaNbNiNeMAxhXv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki384ZQs5startMFNaNbNiNfZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki384ZQs6T_0_15FNaNbNiiPxG64hKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki384ZQs6__initZ@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki384ZQs6finishMFNaNbNiNeZG48h@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki384ZQs7T_16_19FNaNbNiNfiKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki384ZQs7T_20_39FNaNbNiNfiKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki384ZQs7T_40_59FNaNbNiNfiKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki384ZQs7T_60_79FNaNbNiNfiKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki384ZQs7paddingyG128h@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki384ZQs9constantsyG80m@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki384ZQs__T12T_SHA2_16_79TmZQrFNaNbNiNfiKG16mmmmKmmmmKmmZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki384ZQs__T13transformSHA2TmZQsFNaNbNiPG8mPxG128hZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki512ZQs12transformX86FNaNbNiPG5kPxG64hZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki512ZQs3putMFNaNbNiNeMAxhXv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki512ZQs5startMFNaNbNiNfZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki512ZQs6T_0_15FNaNbNiiPxG64hKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki512ZQs6__initZ@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki512ZQs6finishMFNaNbNiNeZG64h@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki512ZQs7T_16_19FNaNbNiNfiKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki512ZQs7T_20_39FNaNbNiNfiKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki512ZQs7T_40_59FNaNbNiNfiKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki512ZQs7T_60_79FNaNbNiNfiKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki512ZQs7paddingyG128h@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki512ZQs9constantsyG80m@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki512ZQs__T12T_SHA2_16_79TmZQrFNaNbNiNfiKG16mmmmKmmmmKmmZv@Base 12
+ _D3std6digest3sha__T3SHAVki1024Vki512ZQs__T13transformSHA2TmZQsFNaNbNiPG8mPxG128hZv@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki160ZQr12transformX86FNaNbNiPG5kPxG64hZv@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki160ZQr3putMFNaNbNiNeMAxhXv@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki160ZQr5startMFNaNbNiNfZv@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki160ZQr6T_0_15FNaNbNiiPxG64hKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki160ZQr6__initZ@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki160ZQr6finishMFNaNbNiNeZG20h@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki160ZQr7T_16_19FNaNbNiNfiKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki160ZQr7T_20_39FNaNbNiNfiKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki160ZQr7T_40_59FNaNbNiNfiKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki160ZQr7T_60_79FNaNbNiNfiKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki160ZQr7paddingyG128h@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki160ZQr9constantsyG64k@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki224ZQr12transformX86FNaNbNiPG5kPxG64hZv@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki224ZQr3putMFNaNbNiNeMAxhXv@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki224ZQr5startMFNaNbNiNfZv@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki224ZQr6T_0_15FNaNbNiiPxG64hKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki224ZQr6__initZ@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki224ZQr6finishMFNaNbNiNeZG28h@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki224ZQr7T_16_19FNaNbNiNfiKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki224ZQr7T_20_39FNaNbNiNfiKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki224ZQr7T_40_59FNaNbNiNfiKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki224ZQr7T_60_79FNaNbNiNfiKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki224ZQr7paddingyG128h@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki224ZQr9constantsyG64k@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki224ZQr__T12T_SHA2_16_79TkZQrFNaNbNiNfiKG16kkkkKkkkkKkkZv@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki224ZQr__T13transformSHA2TkZQsFNaNbNiPG8kPxG64hZv@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki256ZQr12transformX86FNaNbNiPG5kPxG64hZv@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki256ZQr3putMFNaNbNiNeMAxhXv@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki256ZQr5startMFNaNbNiNfZv@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki256ZQr6T_0_15FNaNbNiiPxG64hKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki256ZQr6__initZ@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki256ZQr6finishMFNaNbNiNeZG32h@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki256ZQr7T_16_19FNaNbNiNfiKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki256ZQr7T_20_39FNaNbNiNfiKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki256ZQr7T_40_59FNaNbNiNfiKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki256ZQr7T_60_79FNaNbNiNfiKG16kkKkkkkKkZv@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki256ZQr7paddingyG128h@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki256ZQr9constantsyG64k@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki256ZQr__T12T_SHA2_16_79TkZQrFNaNbNiNfiKG16kkkkKkkkkKkkZv@Base 12
+ _D3std6digest3sha__T3SHAVki512Vki256ZQr__T13transformSHA2TkZQsFNaNbNiPG8kPxG64hZv@Base 12
+ _D3std6digest4hmac11__moduleRefZ@Base 12
+ _D3std6digest4hmac12__ModuleInfoZ@Base 12
+ _D3std6digest6Digest11__InterfaceZ@Base 12
+ _D3std6digest6DigestQoMFNbNeMAxAvXAh@Base 12
+ _D3std6digest6ripemd11__moduleRefZ@Base 12
+ _D3std6digest6ripemd12__ModuleInfoZ@Base 12
+ _D3std6digest6ripemd9RIPEMD1601FFNaNbNiNfkkkZk@Base 12
+ _D3std6digest6ripemd9RIPEMD1601GFNaNbNiNfkkkZk@Base 12
+ _D3std6digest6ripemd9RIPEMD1601HFNaNbNiNfkkkZk@Base 12
+ _D3std6digest6ripemd9RIPEMD1601IFNaNbNiNfkkkZk@Base 12
+ _D3std6digest6ripemd9RIPEMD1601JFNaNbNiNfkkkZk@Base 12
+ _D3std6digest6ripemd9RIPEMD1602FFFNaNbNiNfKkkKkkkkkZv@Base 12
+ _D3std6digest6ripemd9RIPEMD1602GGFNaNbNiNfKkkKkkkkkZv@Base 12
+ _D3std6digest6ripemd9RIPEMD1602HHFNaNbNiNfKkkKkkkkkZv@Base 12
+ _D3std6digest6ripemd9RIPEMD1602IIFNaNbNiNfKkkKkkkkkZv@Base 12
+ _D3std6digest6ripemd9RIPEMD1602JJFNaNbNiNfKkkKkkkkkZv@Base 12
+ _D3std6digest6ripemd9RIPEMD1603FFFFNaNbNiNfKkkKkkkkkZv@Base 12
+ _D3std6digest6ripemd9RIPEMD1603GGGFNaNbNiNfKkkKkkkkkZv@Base 12
+ _D3std6digest6ripemd9RIPEMD1603HHHFNaNbNiNfKkkKkkkkkZv@Base 12
+ _D3std6digest6ripemd9RIPEMD1603IIIFNaNbNiNfKkkKkkkkkZv@Base 12
+ _D3std6digest6ripemd9RIPEMD1603JJJFNaNbNiNfKkkKkkkkkZv@Base 12
+ _D3std6digest6ripemd9RIPEMD1603putMFNaNbNiNeMAxhXv@Base 12
+ _D3std6digest6ripemd9RIPEMD1605startMFNaNbNiNfZv@Base 12
+ _D3std6digest6ripemd9RIPEMD1606__initZ@Base 12
+ _D3std6digest6ripemd9RIPEMD1606finishMFNaNbNiNeZG20h@Base 12
+ _D3std6digest6ripemd9RIPEMD1608_paddingyG64h@Base 12
+ _D3std6digest6ripemd9RIPEMD1609transformMFNaNbNiPxG64hZv@Base 12
+ _D3std6digestQh11__moduleRefZ@Base 12
+ _D3std6digestQh12__ModuleInfoZ@Base 12
+ _D3std6digest__T11toHexStringVEQBdQBc5Orderi1VEQBt5ascii10LetterCasei0ZQCdFNaNbNfIAhZAya@Base 12
+ _D3std6digest__T11toHexStringVEQBdQBc5Orderi1Vmi16VEQBy5ascii10LetterCasei0ZQCiFNaNbNiNfxG16hZG32a@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe2md3MD5ZQBf3putMFNbNeMAxhXv@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe2md3MD5ZQBf5resetMFNbNeZv@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe2md3MD5ZQBf6__ctorMFNaNbNiNfZCQCpQCo__TQCkTQByZQCs@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe2md3MD5ZQBf6__initZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe2md3MD5ZQBf6__vtblZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe2md3MD5ZQBf6finishMFNbAhZQd@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe2md3MD5ZQBf6finishMFNbNeZAh@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe2md3MD5ZQBf6lengthMxFNaNbNdNeZm@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe2md3MD5ZQBf7__ClassZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki32Vmi3988292384ZQxZQCe3putMFNbNeMAxhXv@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki32Vmi3988292384ZQxZQCe4peekMxFNaNbNeAhZQd@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki32Vmi3988292384ZQxZQCe4peekMxFNaNbNeZAh@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki32Vmi3988292384ZQxZQCe5resetMFNbNeZv@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki32Vmi3988292384ZQxZQCe6__ctorMFNaNbNiNfZCQDoQDn__TQDjTQCxZQDr@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki32Vmi3988292384ZQxZQCe6__initZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki32Vmi3988292384ZQxZQCe6__vtblZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki32Vmi3988292384ZQxZQCe6finishMFNbAhZQd@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki32Vmi3988292384ZQxZQCe6finishMFNbNeZAh@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki32Vmi3988292384ZQxZQCe6lengthMxFNaNbNdNeZm@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki32Vmi3988292384ZQxZQCe7__ClassZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN2882303761517117440ZQBgZQCo3putMFNbNeMAxhXv@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN2882303761517117440ZQBgZQCo4peekMxFNaNbNeAhZQd@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN2882303761517117440ZQBgZQCo4peekMxFNaNbNeZAh@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN2882303761517117440ZQBgZQCo5resetMFNbNeZv@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN2882303761517117440ZQBgZQCo6__ctorMFNaNbNiNfZCQDyQDx__TQDtTQDhZQEb@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN2882303761517117440ZQBgZQCo6__initZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN2882303761517117440ZQBgZQCo6__vtblZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN2882303761517117440ZQBgZQCo6finishMFNbAhZQd@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN2882303761517117440ZQBgZQCo6finishMFNbNeZAh@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN2882303761517117440ZQBgZQCo6lengthMxFNaNbNdNeZm@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN2882303761517117440ZQBgZQCo7__ClassZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN3932672073523589310ZQBgZQCo3putMFNbNeMAxhXv@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN3932672073523589310ZQBgZQCo4peekMxFNaNbNeAhZQd@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN3932672073523589310ZQBgZQCo4peekMxFNaNbNeZAh@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN3932672073523589310ZQBgZQCo5resetMFNbNeZv@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN3932672073523589310ZQBgZQCo6__ctorMFNaNbNiNfZCQDyQDx__TQDtTQDhZQEb@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN3932672073523589310ZQBgZQCo6__initZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN3932672073523589310ZQBgZQCo6__vtblZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN3932672073523589310ZQBgZQCo6finishMFNbAhZQd@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN3932672073523589310ZQBgZQCo6finishMFNbNeZAh@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN3932672073523589310ZQBgZQCo6lengthMxFNaNbNdNeZm@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN3932672073523589310ZQBgZQCo7__ClassZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki224ZQsZQBz3putMFNbNeMAxhXv@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki224ZQsZQBz5resetMFNbNeZv@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki224ZQsZQBz6__ctorMFNaNbNiNfZCQDjQDi__TQDeTQCsZQDm@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki224ZQsZQBz6__initZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki224ZQsZQBz6__vtblZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki224ZQsZQBz6finishMFNbAhZQd@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki224ZQsZQBz6finishMFNbNeZAh@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki224ZQsZQBz6lengthMxFNaNbNdNeZm@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki224ZQsZQBz7__ClassZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki256ZQsZQBz3putMFNbNeMAxhXv@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki256ZQsZQBz5resetMFNbNeZv@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki256ZQsZQBz6__ctorMFNaNbNiNfZCQDjQDi__TQDeTQCsZQDm@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki256ZQsZQBz6__initZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki256ZQsZQBz6__vtblZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki256ZQsZQBz6finishMFNbAhZQd@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki256ZQsZQBz6finishMFNbNeZAh@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki256ZQsZQBz6lengthMxFNaNbNdNeZm@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki256ZQsZQBz7__ClassZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki384ZQsZQBz3putMFNbNeMAxhXv@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki384ZQsZQBz5resetMFNbNeZv@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki384ZQsZQBz6__ctorMFNaNbNiNfZCQDjQDi__TQDeTQCsZQDm@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki384ZQsZQBz6__initZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki384ZQsZQBz6__vtblZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki384ZQsZQBz6finishMFNbAhZQd@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki384ZQsZQBz6finishMFNbNeZAh@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki384ZQsZQBz6lengthMxFNaNbNdNeZm@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki384ZQsZQBz7__ClassZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki512ZQsZQBz3putMFNbNeMAxhXv@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki512ZQsZQBz5resetMFNbNeZv@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki512ZQsZQBz6__ctorMFNaNbNiNfZCQDjQDi__TQDeTQCsZQDm@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki512ZQsZQBz6__initZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki512ZQsZQBz6__vtblZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki512ZQsZQBz6finishMFNbAhZQd@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki512ZQsZQBz6finishMFNbNeZAh@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki512ZQsZQBz6lengthMxFNaNbNdNeZm@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki512ZQsZQBz7__ClassZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki160ZQrZQBy3putMFNbNeMAxhXv@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki160ZQrZQBy5resetMFNbNeZv@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki160ZQrZQBy6__ctorMFNaNbNiNfZCQDiQDh__TQDdTQCrZQDl@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki160ZQrZQBy6__initZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki160ZQrZQBy6__vtblZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki160ZQrZQBy6finishMFNbAhZQd@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki160ZQrZQBy6finishMFNbNeZAh@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki160ZQrZQBy6lengthMxFNaNbNdNeZm@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki160ZQrZQBy7__ClassZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki224ZQrZQBy3putMFNbNeMAxhXv@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki224ZQrZQBy5resetMFNbNeZv@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki224ZQrZQBy6__ctorMFNaNbNiNfZCQDiQDh__TQDdTQCrZQDl@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki224ZQrZQBy6__initZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki224ZQrZQBy6__vtblZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki224ZQrZQBy6finishMFNbAhZQd@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki224ZQrZQBy6finishMFNbNeZAh@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki224ZQrZQBy6lengthMxFNaNbNdNeZm@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki224ZQrZQBy7__ClassZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki256ZQrZQBy3putMFNbNeMAxhXv@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki256ZQrZQBy5resetMFNbNeZv@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki256ZQrZQBy6__ctorMFNaNbNiNfZCQDiQDh__TQDdTQCrZQDl@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki256ZQrZQBy6__initZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki256ZQrZQBy6__vtblZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki256ZQrZQBy6finishMFNbAhZQd@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki256ZQrZQBy6finishMFNbNeZAh@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki256ZQrZQBy6lengthMxFNaNbNdNeZm@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki256ZQrZQBy7__ClassZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe6ripemd9RIPEMD160ZQBp3putMFNbNeMAxhXv@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe6ripemd9RIPEMD160ZQBp5resetMFNbNeZv@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe6ripemd9RIPEMD160ZQBp6__ctorMFNaNbNiNfZCQCzQCy__TQCuTQCiZQDc@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe6ripemd9RIPEMD160ZQBp6__initZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe6ripemd9RIPEMD160ZQBp6__vtblZ@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe6ripemd9RIPEMD160ZQBp6finishMFNbAhZQd@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe6ripemd9RIPEMD160ZQBp6finishMFNbNeZAh@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe6ripemd9RIPEMD160ZQBp6lengthMxFNaNbNdNeZm@Base 12
+ _D3std6digest__T13WrapperDigestTSQBfQBe6ripemd9RIPEMD160ZQBp7__ClassZ@Base 12
+ _D3std6digest__T15toHexStringImplVEQBhQBg5Orderi1VEQBx5ascii10LetterCasei0TAxhTAaZQCoFNaNbNiNfMKxAhKQvZv@Base 12
+ _D3std6digest__T15toHexStringImplVEQBhQBg5Orderi1VEQBx5ascii10LetterCasei0TG16hTG32aZQCrFNaNbNiNfKxG16hKQyZv@Base 12
+ _D3std6digest__T7asArrayVmi16ThZQqFNaNbNcNiKAhAyaZG16h@Base 12
+ _D3std6digest__T7asArrayVmi20ThZQqFNaNbNcNiKAhAyaZG20h@Base 12
+ _D3std6digest__T7asArrayVmi28ThZQqFNaNbNcNiKAhAyaZG28h@Base 12
+ _D3std6digest__T7asArrayVmi32ThZQqFNaNbNcNiKAhAyaZG32h@Base 12
+ _D3std6digest__T7asArrayVmi48ThZQqFNaNbNcNiKAhAyaZG48h@Base 12
+ _D3std6digest__T7asArrayVmi4ThZQpFNaNbNcNiKAhAyaZG4h@Base 12
+ _D3std6digest__T7asArrayVmi64ThZQqFNaNbNcNiKAhAyaZG64h@Base 12
+ _D3std6digest__T7asArrayVmi8ThZQpFNaNbNcNiKAhAyaZG8h@Base 12
+ _D3std6format11__moduleRefZ@Base 12
+ _D3std6format12__ModuleInfoZ@Base 12
+ _D3std6format15FormatException6__ctorMFNaNbNiNfAyaQdmC6object9ThrowableZCQCtQCsQCo@Base 12
+ _D3std6format15FormatException6__ctorMFNaNbNiNfZCQBvQBuQBq@Base 12
+ _D3std6format15FormatException6__initZ@Base 12
+ _D3std6format15FormatException6__vtblZ@Base 12
+ _D3std6format15FormatException7__ClassZ@Base 12
+ _D3std6format4read11__moduleRefZ@Base 12
+ _D3std6format4read12__ModuleInfoZ@Base 12
+ _D3std6format4spec11__moduleRefZ@Base 12
+ _D3std6format4spec12__ModuleInfoZ@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp11__xopEqualsMxFKxSQCdQCcQBy__TQBwTaZQCcZb@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp11flSeparatorMFNaNbNdNiNfbZv@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp11flSeparatorMxFNaNbNdNiNfZb@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp12getCurFmtStrMxFNaNfZAya@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp16separatorCharPosMFNaNbNiNfZi@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp16separatorCharPosMFNaNbNiNfiZv@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp6__ctorMFNaNbNcNiNfIAaZSQCiQChQCd__TQCbTaZQCh@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp6__initZ@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp6fillUpMFNaNlNfZv@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp6flDashMFNaNbNdNiNfbZv@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp6flDashMxFNaNbNdNiNfZb@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp6flHashMFNaNbNdNiNfbZv@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp6flHashMxFNaNbNdNiNfZb@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp6flPlusMFNaNbNdNiNfbZv@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp6flPlusMxFNaNbNdNiNfZb@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp6flZeroMFNaNbNdNiNfbZv@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp6flZeroMxFNaNbNdNiNfZb@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp7flEqualMFNaNbNdNiNfbZv@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp7flEqualMxFNaNbNdNiNfZb@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp7flSpaceMFNaNbNdNiNfbZv@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp7flSpaceMxFNaNbNdNiNfZb@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp8toStringMxFNaNfZAya@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp9__xtoHashFNbNeKxSQCcQCbQBx__TQBvTaZQCbZm@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp__T17writeUpToNextSpecTDFMAxaZvZQBdMFNlKQrZb@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp__T17writeUpToNextSpecTSQCi5array__T8AppenderTAyaZQoZQByMFNaNlNfKQBqZb@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp__T17writeUpToNextSpecTSQCi5array__T8AppenderTyAaZQoZQByMFNaNlNfKQBqZb@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp__T17writeUpToNextSpecTSQCi5stdio4File17LockingTextWriterZQCdMFNlNfKQBtZb@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp__T17writeUpToNextSpecTSQCiQCh8NoOpSinkZQBlMFNaNlNfKQBdZb@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp__T17writeUpToNextSpecTSQCiQChQCd__T10singleSpecTyaZQqFAyaZ16DummyOutputRangeZQCxMFNaNlNfKQCpZb@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp__T17writeUpToNextSpecTSQCiQCh__T7sformatTaTxdZQoFNkMAaMAxaxdZ4SinkZQCnMFNaNlNfKQCfZb@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp__T17writeUpToNextSpecTSQCiQCh__T7sformatTaTykTykTkTkTkZQxFNkMAaMAxaykykkkkZ4SinkZQDbMFNaNlNfKQCtZb@Base 12
+ _D3std6format4spec__T10FormatSpecTaZQp__T8toStringTSQBy5array__T8AppenderTAyaZQoZQBoMxFNaNfKQBpZv@Base 12
+ _D3std6format4spec__T10singleSpecTyaZQqFAyaZ16DummyOutputRange6__initZ@Base 12
+ _D3std6format4spec__T10singleSpecTyaZQqFAyaZ16DummyOutputRange__T3putTaZQhMFNaNbNiNfMxAaZv@Base 12
+ _D3std6format4spec__T10singleSpecTyaZQqFNaNfAyaZSQBvQBuQBq__T10FormatSpecTaZQp@Base 12
+ _D3std6format5write11__moduleRefZ@Base 12
+ _D3std6format5write12__ModuleInfoZ@Base 12
+ _D3std6format5write__T11formatValueTDFNaNbNfAxaZvTxeTaZQBhFNaNfKQBcKxeMKxSQCuQCt4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTAaTaZQBxFNaNfKQBsKQsMKxSQDkQDj4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTAxaTaZQByFNaNfKQBtKQtMKxSQDlQDk4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTAxhTaZQByFNaNfKQBtKQtMKxSQDlQDk4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTAyAaTaZQBzFNaNfKQBuKQuMKxSQDmQDl4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTC14TypeInfo_ClassTaZQCmFNaNfKQChKQBhMKxSQEaQDz4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTEQCn3net7isemail15EmailStatusCodeTaZQDcFNaNfKQCxKQBxMKxSQEqQEp4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTEQCn8datetime4date5MonthTaZQCtFNaNfKQCoKQBoMKxSQEhQEg4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTPSQCo11parallelism12AbstractTaskTaZQDbFNaNfKQCwKQBwMKxSQEpQEo4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTPvTaZQBxFNaNfKQBsKQsMKxSQDkQDj4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTPxSQCpQCo4spec__T10FormatSpecTaZQpTaZQDdFNaNfKQCyQBxMKxQCbZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTPxhTaZQByFNaNfKQBtKQtMKxSQDlQDk4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTQhTaZQBxFNaNfKQBsKQzMKxSQDkQDj4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTSQCn11concurrency3TidTaZQCqFNaNfKQClKQBlMKxSQEeQEd4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTSQCn4path__T16asNormalizedPathTSQDs5range__T5chainTSQEm3utf__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplTSQGmQCu__T10OnlyResultTaZQpTQDcZQDnFQDkQBnQDqZ6ResultZQFpFNkMQFdZQtTaZQIjFNaNfKQIeKQHeMKxSQJxQJw4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTSQCn5range__T5chainTSQDh3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQFgQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultTaZQGpFNaNfKQGkKQFkMKxSQIdQIc4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTaTaZQBwFNaNfKQBrKaMKxSQDiQDh4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTaTaZQBwFNaNfKQBraMKxSQDhQDg4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTbTaZQBwFNaNfKQBrKbMKxSQDiQDh4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTbTaZQBwFNaNfKQBrbMKxSQDhQDg4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoThTaZQBwFNaNfKQBrKhMKxSQDiQDh4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTiTaZQBwFNaNfKQBrKiMKxSQDiQDh4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTkTaZQBwFNaNfKQBrKkMKxSQDiQDh4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTmTaZQBwFNaNfKQBrKmMKxSQDiQDh4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTwTaZQBwFNaNfKQBrKwMKxSQDiQDh4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTwTaZQBwFNaNfKQBrwMKxSQDhQDg4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTxAaTaZQByFNaNfKQBtKxQtMKxSQDmQDl4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTxEQCo8datetime4date5MonthTaZQCuFNaNfKQCpKxQBpMKxSQEjQEi4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTxaTaZQBxFNaNfKQBsKxaMKxSQDkQDj4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTxaTaZQBxFNaNfKQBsxaMKxSQDjQDi4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTxhTaZQBxFNaNfKQBsKxhMKxSQDkQDj4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTxiTaZQBxFNaNfKQBsKxiMKxSQDkQDj4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTxkTaZQBxFNaNfKQBsKxkMKxSQDkQDj4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTxmTaZQBxFNaNfKQBsKxmMKxSQDkQDj4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTxsTaZQBxFNaNfKQBsKxsMKxSQDkQDj4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTxtTaZQBxFNaNfKQBsKxtMKxSQDkQDj4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTyAaTaZQByFNaNfKQBtKyQtMKxSQDmQDl4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTyaTaZQBxFNaNfKQBsKyaMKxSQDkQDj4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTAyaZQoTymTaZQBxFNaNfKQBsKymMKxSQDkQDj4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTyAaZQoTEQCn12experimental6logger4core8LogLevelTaZQDiFNaNfKQDdKQCdMKxSQEwQEv4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTyAaZQoTEQCn5regex8internal2ir2IRTaZQCuFNaNfKQCpKQBpMKxSQEiQEh4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTyAaZQoTEQCn6socket12SocketOptionTaZQCuFNaNfKQCpKQBpMKxSQEiQEh4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTyAaZQoTkTaZQBwFNaNfKQBrKkMKxSQDiQDh4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTyAaZQoTwTaZQBwFNaNfKQBrKwMKxSQDiQDh4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTyAaZQoTwTaZQBwFNaNfKQBrwMKxSQDhQDg4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5array__T8AppenderTyAaZQoTyaTaZQBxFNaNfKQBsKyaMKxSQDkQDj4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5stdio4File17LockingTextWriterTAxaTaZQCdFNfKQBwKQrMKxSQDoQDn4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5stdio4File17LockingTextWriterTAyaTaZQCdFNfKQBwKQrMKxSQDoQDn4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5stdio4File17LockingTextWriterTEQCs8datetime4date5MonthTaZQCyFNfKQCrKQBmMKxSQEkQEj4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5stdio4File17LockingTextWriterThTaZQCbFNfKQBuKhMKxSQDlQDk4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5stdio4File17LockingTextWriterTiTaZQCbFNfKQBuKiMKxSQDlQDk4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5stdio4File17LockingTextWriterTkTaZQCbFNfKQBuKkMKxSQDlQDk4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5stdio4File17LockingTextWriterTmTaZQCbFNfKQBuKmMKxSQDlQDk4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5stdio4File17LockingTextWriterTsTaZQCbFNfKQBuKsMKxSQDlQDk4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5stdio4File17LockingTextWriterTwTaZQCbFNfKQBuKwMKxSQDlQDk4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5stdio4File17LockingTextWriterTwTaZQCbFNfKQBuwMKxSQDkQDj4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5stdio4File17LockingTextWriterTxaTaZQCcFNfKQBvKxaMKxSQDnQDm4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5stdio4File17LockingTextWriterTxlTaZQCcFNfKQBvKxlMKxSQDnQDm4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5stdio4File17LockingTextWriterTxmTaZQCcFNfKQBvKxmMKxSQDnQDm4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBj5stdio4File17LockingTextWriterTyaTaZQCcFNfKQBvKyaMKxSQDnQDm4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBjQBi8NoOpSinkTmTaZQBjFNaNfKQBeKmMKxSQCvQCu4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBjQBi8NoOpSinkTxdTaZQBkFNaNfKQBfKxdMKxSQCxQCw4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBjQBi8NoOpSinkTxhTaZQBkFNaNfKQBfKxhMKxSQCxQCw4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBjQBi8NoOpSinkTymTaZQBkFNaNfKQBfKymMKxSQCxQCw4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBjQBi__T7sformatTaTxdZQoFNkMAaMAxaxdZ4SinkTxdTaZQCmFNaNfKQChKxdMKxSQDzQDy4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBjQBi__T7sformatTaTykTykTkTkTkZQxFNkMAaMAxaykykkkkZ4SinkTkTaZQCzFNaNfKQCuKkMKxSQElQEk4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T11formatValueTSQBjQBi__T7sformatTaTykTykTkTkTkZQxFNkMAaMAxaykykkkkZ4SinkTykTaZQDaFNaNfKQCvKykMKxSQEnQEm4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTAaTPvZQCdFNaNfKQBvMxAaQwQvZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTAxaTQeZQCeFNaNfKQBwMxAaQxQzZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTAxhZQCbFNaNfKQBtMxAaQuZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTAyAaZQCcFNaNfKQBuMxAaQvZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTC14TypeInfo_ClassTkTkZQCtFNaNfKQClMxAaQBmkkZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTEQCs8datetime4date5MonthZQCwFNaNfKQCoMxAaQBpZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTPvZQCaFNaNfKQBsMxAaQtZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTQjTQmTQpZQCgFNaNfKQByMxAaQBiQBlQBoZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTQjTQmTmZQCfFNaNfKQBxMxAaQBhQBkmZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTQjTQmZQCdFNaNfKQBvMxAaQBfQBiZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTQjTkZQCcFNaNfKQBuMxAaQBekZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTQjTxkTQpZQCgFNaNfKQByMxAaQBixkQBnZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTQjTxkTkZQCfFNaNfKQBxMxAaQBhxkkZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTQjTxkZQCdFNaNfKQBvMxAaQBfxkZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTQjZQCaFNaNfKQBsMxAaQBcZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTbTQlTQoTEQDa3net7isemail15EmailStatusCodeZQDnFNaNfKQDfMxAabQCqQCtQCfZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTiTEQCu8datetime4date5MonthTiZQDaFNaNfKQCsMxAaiQBsiZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTiTiZQCbFNaNfKQBtMxAaiiZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTiZQBzFNaNfKQBrMxAaiZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTkTkTkZQCdFNaNfKQBvMxAakkkZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTkTxkTxkTxkZQCiFNaNfKQCaMxAakxkxkxkZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTkZQBzFNaNfKQBrMxAakZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTmTmTymZQCeFNaNfKQBwMxAammymZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTmTmZQCbFNaNfKQBtMxAammZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTmZQBzFNaNfKQBrMxAamZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTwTkTkZQCdFNaNfKQBvMxAawkkZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTwTkZQCbFNaNfKQBtMxAawkZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTwZQBzFNaNfKQBrMxAawZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTxhTxhTxhTxhZQCjFNaNfKQCbMxAaxhxhxhxhZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTxhTxhTxhZQCgFNaNfKQByMxAaxhxhxhZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTxkZQCaFNaNfKQBsMxAaxkZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTxmTxmZQCdFNaNfKQBvMxAaxmxmZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTxsTQmTxhZQCgFNaNfKQByMxAaxsQBkxhZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTxsTxEQCw8datetime4date5MonthTxhZQDdFNaNfKQCvMxAaxsxQBvxhZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTxsZQCaFNaNfKQBsMxAaxsZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTAyaZQoTaTxtTQmTxtTxtZQCjFNaNfKQCbMxAaxtQBnxtxtZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5array__T8AppenderTyAaZQoTaTkZQBzFNaNfKQBrMxAakZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5stdio4File17LockingTextWriterTaTAxaZQCgFNfQBvMxAaQrZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5stdio4File17LockingTextWriterTaTAyaTQeTiTQjZQCoFNfKQCeMxAaQBaQBdiQBhZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5stdio4File17LockingTextWriterTaTAyaTmTQgTQjTxlZQCrFNfKQChMxAaQBdmQBhQBkxlZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5stdio4File17LockingTextWriterTaTAyaTmTQgTQjTxmZQCrFNfKQChMxAaQBdmQBhQBkxmZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5stdio4File17LockingTextWriterTaTAyaTmTQgTxmZQCoFNfKQCeMxAaQBamQBexmZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5stdio4File17LockingTextWriterTaTAyaTxmTQhTQkTmZQCrFNfKQChMxAaQBdxmQBiQBlmZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5stdio4File17LockingTextWriterTaTAyaTxmTQhTxmZQCpFNfKQCfMxAaQBbxmQBgxmZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5stdio4File17LockingTextWriterTaTAyaZQCgFNfKQBwMxAaQsZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5stdio4File17LockingTextWriterTaTkZQCeFNfKQBuMxAakZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5stdio4File17LockingTextWriterTaTmTAyaTmTQgTmTQlTQoZQCvFNfKQClMxAamQBgmQBkmQBoQBrZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBm5stdio4File17LockingTextWriterTaTsTEQCz8datetime4date5MonthThThThThTxlZQDoFNfKQDeMxAasQBzhhhhxlZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBmQBl8NoOpSinkTaTmTmTymZQBrFNaNfQBiMxAammymZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBmQBl8NoOpSinkTaTmTmZQBoFNaNfQBfMxAammZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBmQBl8NoOpSinkTaTmZQBmFNaNfQBdMxAamZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBmQBl8NoOpSinkTaTxdZQBnFNaNfQBeMxAaxdZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBmQBl8NoOpSinkTaTxhTxhTxhZQBtFNaNfQBkMxAaxhxhxhZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBmQBl__T7sformatTaTxdZQoFNkMAaMAxaxdZ4SinkTaTxdZQCpFNaNfKQChMxAaxdZk@Base 12
+ _D3std6format5write__T14formattedWriteTSQBmQBl__T7sformatTaTykTykTkTkTkZQxFNkMAaMAxaykykkkkZ4SinkTaTykTykTkTkTkZQDmFNaNfKQDeMxAaykykkkkZk@Base 12
+ _D3std6format5write__T14formattedWriteVAyaa13_54253032642530326425303264TSQCu5array__T8AppenderTQCfZQoTxhTxhTxhZQDmFNaNfKQBwxhxhxhZk@Base 12
+ _D3std6format5write__T14formattedWriteVAyaa15_20253032643a253032643a25303264TSQCy5array__T8AppenderTQCjZQoTxhTxhTxhZQDqFNaNfKQBwxhxhxhZk@Base 12
+ _D3std6format5write__T14formattedWriteVAyaa15_54253032643a253032643a25303264TSQCy5array__T8AppenderTQCjZQoTxhTxhTxhZQDqFNaNfKQBwxhxhxhZk@Base 12
+ _D3std6format8NoOpSink6__initZ@Base 12
+ _D3std6format8NoOpSink__T3putTAaZQiMFNaNbNiNfMxAaZv@Base 12
+ _D3std6format8NoOpSink__T3putTAxaZQjMFNaNbNiNfMxAaZv@Base 12
+ _D3std6format8NoOpSink__T3putTAyaZQjMFNaNbNiNfMxAyaZv@Base 12
+ _D3std6format8NoOpSink__T3putTaZQhMFNaNbNiNfxaZv@Base 12
+ _D3std6format8NoOpSink__T3putTwZQhMFNaNbNiNfxwZv@Base 12
+ _D3std6format8internal4read11__moduleRefZ@Base 12
+ _D3std6format8internal4read12__ModuleInfoZ@Base 12
+ _D3std6format8internal5write10baseOfSpecFNaNfIaZk@Base 12
+ _D3std6format8internal5write11__moduleRefZ@Base 12
+ _D3std6format8internal5write12__ModuleInfoZ@Base 12
+ _D3std6format8internal5write__T10formatCharTSQBr5array__T8AppenderTAyaZQoZQBrFNaNfKQBnIwIaZv@Base 12
+ _D3std6format8internal5write__T10formatCharTSQBr5array__T8AppenderTyAaZQoZQBrFNaNfKQBnIwIaZv@Base 12
+ _D3std6format8internal5write__T10formatCharTSQBr5stdio4File17LockingTextWriterZQBwFNfKQBqIwIaZv@Base 12
+ _D3std6format8internal5write__T11formatRangeTSQBs5array__T8AppenderTAyaZQoTAxaTaZQByFNaNfKQBtKQtMKxSQDuQDt4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T11formatRangeTSQBs5array__T8AppenderTAyaZQoTAxhTaZQByFNaNfKQBtKQtMKxSQDuQDt4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T11formatRangeTSQBs5array__T8AppenderTAyaZQoTAyAaTaZQBzFNaNfKQBuKQuMKxSQDvQDu4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T11formatRangeTSQBs5array__T8AppenderTAyaZQoTQhTaZQBxFNaNfKQBsKQzMKxSQDtQDs4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T11formatRangeTSQBs5array__T8AppenderTAyaZQoTSQCw4path__T16asNormalizedPathTSQEb5range__T5chainTSQEv3utf__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplTSQGvQCu__T10OnlyResultTaZQpTQDcZQDnFQDkQBnQDqZ6ResultZQFpFNkMQFdZQtTaZQIjFNaNfKQIeKQHeMKxSQKgQKf4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T11formatRangeTSQBs5array__T8AppenderTAyaZQoTSQCw5range__T5chainTSQDq3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQFpQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultTaZQGpFNaNfKQGkKQFkMKxSQImQIl4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T11formatRangeTSQBs5array__T8AppenderTyAaZQoTAyaTaZQByFNaNfKQBtKQtMKxSQDuQDt4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T11formatRangeTSQBs5stdio4File17LockingTextWriterTAxaTaZQCdFNfKQBwKQrMKxSQDxQDw4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T11formatRangeTSQBs5stdio4File17LockingTextWriterTAyaTaZQCdFNfKQBwKQrMKxSQDxQDw4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T12formatObjectTSQBt5array__T8AppenderTAyaZQoTC14TypeInfo_ClassTaZQCnFNaNbNfKQCjKQBjMKxSQEmQEl4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T12formatObjectTSQBt5array__T8AppenderTAyaZQoTSQCx11concurrency3TidTaZQCrFNaNfKQClKQBlMKxSQEoQEn4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTDFNaNbNfAxaZvTAaTAyaTQeTQhTaZQBsFNaNfKQBmQBbQBbQBeQBhMKxSQDxQDw4spec__T10FormatSpecTaZQpEQFdQFcQEyQEs13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTDFNaNbNfAxaZvTAaTQdTQgTQjTaZQBrFNaNfKQBlQBaQBdQBgQBjMKxSQDwQDv4spec__T10FormatSpecTaZQpEQFcQFbQExQEr13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTDFNaNbNfAxaZvTAyaTAaTQdTQgTaZQBsFNaNfKQBmQBbQBaQBdQBgMKxSQDxQDw4spec__T10FormatSpecTaZQpEQFdQFcQEyQEs13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTDFNaNbNfAxaZvTAyaTAaTQdTQkTaZQBsFNaNfKQBmQBbQBaQBdQBkMKxSQDxQDw4spec__T10FormatSpecTaZQpEQFdQFcQEyQEs13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTDFNaNbNfAxaZvTAyaTQeTQhTQkTaZQBsFNaNfKQBmQBbQBeQBhQBkMKxSQDxQDw4spec__T10FormatSpecTaZQpEQFdQFcQEyQEs13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTDFNaNbNfAxaZvTAyaTQeTQhTaZQBpFNaNfKQBjQyQBaQBdMKxSQDqQDp4spec__T10FormatSpecTaZQpbZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5array__T8AppenderTAyaZQoTAaTQdTQgTQjTaZQChFNaNfKQCbQBaQBdQBgQBjMKxSQEmQEl4spec__T10FormatSpecTaZQpEQFsQFrQFnQFh13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5array__T8AppenderTAyaZQoTAaTQdTQnTQqTaZQChFNaNfKQCbQBaQBdQBnQBqMKxSQEmQEl4spec__T10FormatSpecTaZQpEQFsQFrQFnQFh13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5array__T8AppenderTAyaZQoTAaTQdTQnTaZQCeFNaNfKQByQxQzQBiMKxSQEeQEd4spec__T10FormatSpecTaZQpbZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5array__T8AppenderTAyaZQoTAaTaZQByFNaNfKQBsQrMKxSQDtQDs4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5array__T8AppenderTAyaZQoTAwTaZQByFNaNfKQBsQrMKxSQDtQDs4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5array__T8AppenderTAyaZQoTAxaTaZQBzFNaNfKQBtQsMKxSQDuQDt4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5array__T8AppenderTAyaZQoTQhTQkTAaTaZQCeFNaNfKQByQBeQBhQxMKxSQEfQEe4spec__T10FormatSpecTaZQpbZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5array__T8AppenderTAyaZQoTQhTQkTAwTaZQCeFNaNfKQByQBeQBhQxMKxSQEfQEe4spec__T10FormatSpecTaZQpbZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5array__T8AppenderTAyaZQoTQhTQkTAxaTaZQCfFNaNfKQBzQBfQBiQyMKxSQEgQEf4spec__T10FormatSpecTaZQpbZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5array__T8AppenderTAyaZQoTQhTQkTQnTAaTaZQChFNaNfKQCbQBhQBkQBnQBaMKxSQEmQEl4spec__T10FormatSpecTaZQpEQFsQFrQFnQFh13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5array__T8AppenderTAyaZQoTQhTQkTQnTAwTaZQChFNaNfKQCbQBhQBkQBnQBaMKxSQEmQEl4spec__T10FormatSpecTaZQpEQFsQFrQFnQFh13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5array__T8AppenderTAyaZQoTQhTQkTQnTAxaTaZQCiFNaNfKQCcQBiQBlQBoQBbMKxSQEnQEm4spec__T10FormatSpecTaZQpEQFtQFsQFoQFi13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5array__T8AppenderTAyaZQoTQhTQkTQnTQqTaZQChFNaNfKQCbQBhQBkQBnQBqMKxSQEmQEl4spec__T10FormatSpecTaZQpEQFsQFrQFnQFh13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5array__T8AppenderTAyaZQoTQhTQkTQnTaZQCeFNaNfKQByQBeQBhQBkMKxSQEgQEf4spec__T10FormatSpecTaZQpbZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5array__T8AppenderTAyaZQoTQhTaZQByFNaNfKQBsQyMKxSQDtQDs4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5array__T8AppenderTyAaZQoTAaTQdTAyaTQeTaZQCiFNaNfKQCcQBbQBeQBbQBeMKxSQEnQEm4spec__T10FormatSpecTaZQpEQFtQFsQFoQFi13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5array__T8AppenderTyAaZQoTAaTQdTAyaTaZQCfFNaNfKQBzQyQBaQxMKxSQEfQEe4spec__T10FormatSpecTaZQpbZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5array__T8AppenderTyAaZQoTAaTQdTQgTQjTaZQChFNaNfKQCbQBaQBdQBgQBjMKxSQEmQEl4spec__T10FormatSpecTaZQpEQFsQFrQFnQFh13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5array__T8AppenderTyAaZQoTAaTaZQByFNaNfKQBsQrMKxSQDtQDs4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5array__T8AppenderTyAaZQoTAwTaZQByFNaNfKQBsQrMKxSQDtQDs4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5array__T8AppenderTyAaZQoTAyaTQeTAaTaZQCfFNaNfKQBzQyQBaQwMKxSQEfQEe4spec__T10FormatSpecTaZQpbZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5array__T8AppenderTyAaZQoTAyaTQeTAwTaZQCfFNaNfKQBzQyQBaQwMKxSQEfQEe4spec__T10FormatSpecTaZQpbZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5array__T8AppenderTyAaZQoTAyaTQeTQhTAaTaZQCiFNaNfKQCcQBbQBeQBhQBaMKxSQEnQEm4spec__T10FormatSpecTaZQpEQFtQFsQFoQFi13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5array__T8AppenderTyAaZQoTAyaTQeTQhTAwTaZQCiFNaNfKQCcQBbQBeQBhQBaMKxSQEnQEm4spec__T10FormatSpecTaZQpEQFtQFsQFoQFi13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5array__T8AppenderTyAaZQoTAyaTQeTQhTQkTaZQCiFNaNfKQCcQBbQBeQBhQBkMKxSQEnQEm4spec__T10FormatSpecTaZQpEQFtQFsQFoQFi13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5array__T8AppenderTyAaZQoTAyaTQeTQhTaZQCfFNaNfKQBzQyQBaQBdMKxSQEgQEf4spec__T10FormatSpecTaZQpbZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5array__T8AppenderTyAaZQoTAyaTaZQBzFNaNfKQBtQsMKxSQDuQDt4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5stdio4File17LockingTextWriterTAaTQdTAyaTQeTaZQCnFNfKQCfQzQBbQyQBaMKxSQEoQEn4spec__T10FormatSpecTaZQpEQFuQFtQFpQFj13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5stdio4File17LockingTextWriterTAaTQdTAyaTaZQCkFNfKQCcQwQyQuMKxSQEhQEg4spec__T10FormatSpecTaZQpbZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5stdio4File17LockingTextWriterTAaTQdTQgTQjTaZQCmFNfKQCeQyQBaQBdQBgMKxSQEoQEn4spec__T10FormatSpecTaZQpEQFuQFtQFpQFj13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5stdio4File17LockingTextWriterTAaTaZQCdFNfKQBvQpMKxSQDwQDv4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5stdio4File17LockingTextWriterTAwTaZQCdFNfKQBvQpMKxSQDwQDv4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5stdio4File17LockingTextWriterTAxaTaZQCeFNfKQBwQqMKxSQDxQDw4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5stdio4File17LockingTextWriterTAyaTQeTAaTaZQCkFNfKQCcQwQyQtMKxSQEhQEg4spec__T10FormatSpecTaZQpbZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5stdio4File17LockingTextWriterTAyaTQeTAwTaZQCkFNfKQCcQwQyQtMKxSQEhQEg4spec__T10FormatSpecTaZQpbZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5stdio4File17LockingTextWriterTAyaTQeTAxaTaZQClFNfKQCdQxQzQuMKxSQEiQEh4spec__T10FormatSpecTaZQpbZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5stdio4File17LockingTextWriterTAyaTQeTQhTAaTaZQCnFNfKQCfQzQBbQBeQxMKxSQEoQEn4spec__T10FormatSpecTaZQpEQFuQFtQFpQFj13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5stdio4File17LockingTextWriterTAyaTQeTQhTAwTaZQCnFNfKQCfQzQBbQBeQxMKxSQEoQEn4spec__T10FormatSpecTaZQpEQFuQFtQFpQFj13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5stdio4File17LockingTextWriterTAyaTQeTQhTAxaTaZQCoFNfKQCgQBaQBdQBgQzMKxSQEqQEp4spec__T10FormatSpecTaZQpEQFwQFvQFrQFl13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5stdio4File17LockingTextWriterTAyaTQeTQhTQkTaZQCnFNfKQCfQzQBbQBeQBhMKxSQEpQEo4spec__T10FormatSpecTaZQpEQFvQFuQFqQFk13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5stdio4File17LockingTextWriterTAyaTQeTQhTaZQCkFNfKQCcQwQyQBaMKxSQEiQEh4spec__T10FormatSpecTaZQpbZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBt5stdio4File17LockingTextWriterTAyaTaZQCeFNfKQBwQqMKxSQDxQDw4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBtQBs8NoOpSinkTAaTAyaTQeTQhTaZQBvFNaNfKQBpQBbQBbQBeQBhMKxSQEaQDz4spec__T10FormatSpecTaZQpEQFgQFfQFbQEv13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBtQBs8NoOpSinkTAaTQdTAyaTQeTaZQBvFNaNfKQBpQBbQBeQBbQBeMKxSQEaQDz4spec__T10FormatSpecTaZQpEQFgQFfQFbQEv13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBtQBs8NoOpSinkTAaTQdTAyaTaZQBsFNaNfKQBmQyQBaQxMKxSQDsQDr4spec__T10FormatSpecTaZQpbZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBtQBs8NoOpSinkTAaTQdTQgTQjTaZQBuFNaNfKQBoQBaQBdQBgQBjMKxSQDzQDy4spec__T10FormatSpecTaZQpEQFfQFeQFaQEu13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBtQBs8NoOpSinkTAyaTAaTQdTQgTaZQBvFNaNfKQBpQBbQBaQBdQBgMKxSQEaQDz4spec__T10FormatSpecTaZQpEQFgQFfQFbQEv13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBtQBs8NoOpSinkTAyaTAaTQdTQkTaZQBvFNaNfKQBpQBbQBaQBdQBkMKxSQEaQDz4spec__T10FormatSpecTaZQpEQFgQFfQFbQEv13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBtQBs8NoOpSinkTAyaTQeTQhTQkTaZQBvFNaNfKQBpQBbQBeQBhQBkMKxSQEaQDz4spec__T10FormatSpecTaZQpEQFgQFfQFbQEv13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBtQBs8NoOpSinkTAyaTQeTQhTaZQBsFNaNfKQBmQyQBaQBdMKxSQDtQDs4spec__T10FormatSpecTaZQpbZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBtQBs__T7sformatTaTxdZQoFNkMAaMAxaxdZ4SinkTAyaTQeTQhTQkTaZQCxFNaNfKQCrQBbQBeQBhQBkMKxSQFcQFb4spec__T10FormatSpecTaZQpEQGiQGhQGdQFx13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBtQBs__T7sformatTaTxdZQoFNkMAaMAxaxdZ4SinkTAyaTQeTQhTaZQCuFNaNfKQCoQyQBaQBdMKxSQEvQEu4spec__T10FormatSpecTaZQpbZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBtQBs__T7sformatTaTxdZQoFNkMAaMAxaxdZ4SinkTAyaTQtTQwTQkTaZQCxFNaNfKQCrQBbQBtQBwQBkMKxSQFcQFb4spec__T10FormatSpecTaZQpEQGiQGhQGdQFx13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBtQBs__T7sformatTaTxdZQoFNkMAaMAxaxdZ4SinkTAyaTQtTQwTQzTaZQCxFNaNfKQCrQBbQBtQBwQBzMKxSQFcQFb4spec__T10FormatSpecTaZQpEQGiQGhQGdQFx13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBtQBs__T7sformatTaTxdZQoFNkMAaMAxaxdZ4SinkTQpTAyaTQeTQhTaZQCxFNaNfKQCrQBqQBbQBeQBhMKxSQFcQFb4spec__T10FormatSpecTaZQpEQGiQGhQGdQFx13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBtQBs__T7sformatTaTxdZQoFNkMAaMAxaxdZ4SinkTQpTQsTQvTQyTaZQCwFNaNfKQCqQBpQBsQBvQByMKxSQFbQFa4spec__T10FormatSpecTaZQpEQGhQGgQGcQFw13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBtQBs__T7sformatTaTykTykTkTkTkZQxFNkMAaMAxaykykkkkZ4SinkTQuTQxTAyaTQeTaZQDlFNaNfKQDfQBvQByQBbQBeMKxSQFqQFp4spec__T10FormatSpecTaZQpEQGwQGvQGrQGl13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBtQBs__T7sformatTaTykTykTkTkTkZQxFNkMAaMAxaykykkkkZ4SinkTQuTQxTAyaTaZQDiFNaNfKQDcQBsQBvQyMKxSQFjQFi4spec__T10FormatSpecTaZQpbZv@Base 12
+ _D3std6format8internal5write__T12writeAlignedTSQBtQBs__T7sformatTaTykTykTkTkTkZQxFNkMAaMAxaykykkkkZ4SinkTQuTQxTQBaTQBeTaZQDmFNaNfKQDgQBwQBzQCcQCfMKxSQFrQFq4spec__T10FormatSpecTaZQpEQGxQGwQGsQGm13PrecisionTypeZv@Base 12
+ _D3std6format8internal5write__T13formatElementTSQBu5array__T8AppenderTAyaZQoTQhTaZQBzFNaNfKQBsQyMKxSQDuQDt4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T13formatElementTSQBu5array__T8AppenderTAyaZQoTaTaZQByFNaNfKQBraMKxSQDsQDr4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T13formatElementTSQBu5array__T8AppenderTAyaZQoTwTaZQByFNaNfKQBrwMKxSQDsQDr4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T13formatElementTSQBu5array__T8AppenderTAyaZQoTxaTaZQBzFNaNfKQBsxaMKxSQDuQDt4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T13formatElementTSQBu5array__T8AppenderTAyaZQoTxhTaZQBzFNaNfKQBsKxhMKxSQDvQDu4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T13formatElementTSQBu5array__T8AppenderTyAaZQoTwTaZQByFNaNfKQBrwMKxSQDsQDr4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T13formatElementTSQBu5stdio4File17LockingTextWriterTwTaZQCdFNfKQBuwMKxSQDvQDu4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTDFNaNbNfAxaZvTeTaZQBkFNaNfKQBbxeMKxSQDfQDe4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTAyaZQoTAaTaZQCbFNaNfKQBsMxAaMKxSQDyQDx4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTAyaZQoTAxaTaZQCcFNaNfKQBtMxAaMKxSQDzQDy4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTAyaZQoTAxhTaZQCcFNaNfKQBtQsMKxSQDxQDw4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTAyaZQoTAyAaTaZQCdFNaNfKQBuQtMKxSQDyQDx4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTAyaZQoTC14TypeInfo_ClassTaZQCqFNaNfKQChQBgMKxSQEmQEl4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTAyaZQoTEQDa3net7isemail15EmailStatusCodeTaZQDgFNaNfKQCxxEQExQBxQBwQBrMKxSQFnQFm4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTAyaZQoTEQDa8datetime4date5MonthTaZQCxFNaNfKQCoxEQEoQBoQBiQBgMKxSQFeQFd4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTAyaZQoTPSQDb11parallelism12AbstractTaskTaZQDfFNaNfKQCwMxPSQEyQBxQBnMKxSQFlQFk4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTAyaZQoTPvTaZQCbFNaNfKQBsMxPvMKxSQDyQDx4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTAyaZQoTPxSQDcQDb4spec__T10FormatSpecTaZQpTaZQDhFNaNfKQCyMxPQByMKxQCeZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTAyaZQoTPxhTaZQCcFNaNfKQBtMxPhMKxSQDzQDy4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTAyaZQoTQhTaZQCbFNaNfKQBsMxAyaMKxSQDzQDy4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTAyaZQoTSQDa11concurrency3TidTaZQCuFNaNfKQClKQBlMKxSQErQEq4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTAyaZQoTSQDa4path__T16asNormalizedPathTSQEf5range__T5chainTSQEz3utf__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplTSQGzQCu__T10OnlyResultTaZQpTQDcZQDnFQDkQBnQDqZ6ResultZQFpFNkMQFdZQtTaZQInFNaNfKQIeKQHeMKxSQKkQKj4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTAyaZQoTSQDa5range__T5chainTSQDu3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQFtQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultTaZQGtFNaNfKQGkKQFkMKxSQIqQIp4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTAyaZQoTaTaZQCaFNaNfKQBrxaMKxSQDvQDu4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTAyaZQoTbTaZQCaFNaNfKQBrxbMKxSQDvQDu4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTAyaZQoTgTaZQCaFNaNfKQBrxgMKxSQDvQDu4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTAyaZQoThTaZQCaFNaNfKQBrxhMKxSQDvQDu4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTAyaZQoTiTaZQCaFNaNfKQBrxiMKxSQDvQDu4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTAyaZQoTkTaZQCaFNaNfKQBrxkMKxSQDvQDu4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTAyaZQoTmTaZQCaFNaNfKQBrxmMKxSQDvQDu4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTAyaZQoTsTaZQCaFNaNfKQBrxsMKxSQDvQDu4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTAyaZQoTtTaZQCaFNaNfKQBrxtMKxSQDvQDu4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTAyaZQoTwTaZQCaFNaNfKQBrxwMKxSQDvQDu4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTyAaZQoTAyaTaZQCcFNaNfKQBtMxAyaMKxSQEaQDz4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTyAaZQoTEQDa12experimental6logger4core8LogLevelTaZQDmFNaNfKQDdxEQFdQCdQBsQBoQBmMKxSQFwQFv4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTyAaZQoTEQDa5regexQCy2ir2IRTaZQCsFNaNfKQCjxEQEjQBjQEeQBgQBgMKxSQFcQFb4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTyAaZQoTEQDa6socket12SocketOptionTaZQCyFNaNfKQCpxEQEpQBpQBlMKxSQFcQFb4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTyAaZQoTaTaZQCaFNaNfKQBrxaMKxSQDvQDu4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTyAaZQoThTaZQCaFNaNfKQBrxhMKxSQDvQDu4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTyAaZQoTiTaZQCaFNaNfKQBrxiMKxSQDvQDu4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTyAaZQoTkTaZQCaFNaNfKQBrxkMKxSQDvQDu4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5array__T8AppenderTyAaZQoTwTaZQCaFNaNfKQBrxwMKxSQDvQDu4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5stdio4File17LockingTextWriterTAxaTaZQChFNfKQBwMxAaMKxSQEcQEb4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5stdio4File17LockingTextWriterTAyaTaZQChFNfKQBwMxAyaMKxSQEdQEc4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5stdio4File17LockingTextWriterTEQDf8datetime4date5MonthTaZQDcFNfKQCrxEQErQBmQBgQBeMKxSQFhQFg4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5stdio4File17LockingTextWriterTaTaZQCfFNfKQBuxaMKxSQDyQDx4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5stdio4File17LockingTextWriterThTaZQCfFNfKQBuxhMKxSQDyQDx4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5stdio4File17LockingTextWriterTiTaZQCfFNfKQBuxiMKxSQDyQDx4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5stdio4File17LockingTextWriterTkTaZQCfFNfKQBuxkMKxSQDyQDx4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5stdio4File17LockingTextWriterTlTaZQCfFNfKQBuxlMKxSQDyQDx4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5stdio4File17LockingTextWriterTmTaZQCfFNfKQBuxmMKxSQDyQDx4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5stdio4File17LockingTextWriterTsTaZQCfFNfKQBuxsMKxSQDyQDx4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBw5stdio4File17LockingTextWriterTwTaZQCfFNfKQBuxwMKxSQDyQDx4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBwQBv8NoOpSinkTdTaZQBnFNaNfKQBexdMKxSQDiQDh4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBwQBv8NoOpSinkThTaZQBnFNaNfKQBexhMKxSQDiQDh4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBwQBv8NoOpSinkTmTaZQBnFNaNfKQBexmMKxSQDiQDh4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBwQBv__T7sformatTaTxdZQoFNkMAaMAxaxdZ4SinkTdTaZQCpFNaNfKQCgxdMKxSQEkQEj4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T15formatValueImplTSQBwQBv__T7sformatTaTykTykTkTkTkZQxFNkMAaMAxaykykkkkZ4SinkTkTaZQDdFNaNfKQCuxkMKxSQEyQEx4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T19needToSwapEndianessTaZQyFNaNbNiNfMKxSQCqQCp4spec__T10FormatSpecTaZQpZb@Base 12
+ _D3std6format8internal5write__T20formatValueImplUlongTSQCb5array__T8AppenderTAyaZQoTaZQCdFNaNfKQBpmIbMKxSQDzQDy4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T20formatValueImplUlongTSQCb5array__T8AppenderTyAaZQoTaZQCdFNaNfKQBpmIbMKxSQDzQDy4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T20formatValueImplUlongTSQCb5stdio4File17LockingTextWriterTaZQCiFNfKQBsmIbMKxSQEcQEb4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T20formatValueImplUlongTSQCbQCa8NoOpSinkTaZQBqFNaNfKQBcmIbMKxSQDmQDl4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T20formatValueImplUlongTSQCbQCa__T7sformatTaTykTykTkTkTkZQxFNkMAaMAxaykykkkkZ4SinkTaZQDgFNaNfKQCsmIbMKxSQFcQFb4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T22enforceValidFormatSpecTC14TypeInfo_ClassTaZQBtFNaNfMKxSQDiQDh4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T22enforceValidFormatSpecTSQCd11concurrency3TidTaZQBxFNaNfMKxSQDmQDl4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T22enforceValidFormatSpecTSQCd4path__T16asNormalizedPathTSQDi5range__T5chainTSQEc3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQGbQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFoFNkMQFcZQtTaZQHpFNaNbNiNfMKxSQJiQJh4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T22enforceValidFormatSpecTSQCd5range__T5chainTSQCx3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQEwQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultTaZQFwFNaNbNiNfMKxSQHpQHo4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal5write__T5roundTG1077aZQoFNaNbNiNfKQtmmEQCiQChQCdQBx13RoundingClassbaZb@Base 12
+ _D3std6format8internal5write__T5roundTG1078aZQoFNaNbNiNfKQtmmEQCiQChQCdQBx13RoundingClassbaZb@Base 12
+ _D3std6format8internal5write__T5roundTG15aZQmFNaNbNiNfKQrmmEQCgQCfQCbQBv13RoundingClassbaZb@Base 12
+ _D3std6format8internal5write__T5roundTG16448aZQpFNaNbNiNfKQummEQCjQCiQCeQBy13RoundingClassbaZb@Base 12
+ _D3std6format8internal5write__T5roundTG16449aZQpFNaNbNiNfKQummEQCjQCiQCeQBy13RoundingClassbaZb@Base 12
+ _D3std6format8internal5write__T5roundTG18aZQmFNaNbNiNfKQrmmEQCgQCfQCbQBv13RoundingClassbaZb@Base 12
+ _D3std6format8internal5write__T5roundTG64aZQmFNaNbNiNfKQrmmEQCgQCfQCbQBv13RoundingClassbaZb@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTAaTPvZQCvFNaNfkQpQoZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTAxaTQeZQCwFNaNfkQqQsZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTAxaZQCtFNaNfkQnZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTAxhZQCtFNaNfkQnZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTAyAaZQCuFNaNfkQoZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTC14TypeInfo_ClassTkTkZQDlFNaNfkQBfkkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTEQDt8datetime4date5MonthZQDoFNaNfkQBiZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTPvZQCsFNaNfkQmZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTQChTQClTQCpZQDbFNaNfkQDcQDfQDiZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTQChTQClTiTQCrZQDdFNaNfkQDeQDhiQDlZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTQChTQClTmZQCzFNaNfkQDaQDdmZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTQChTQClZQCxFNaNfkQCyQDbZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTQChTkZQCvFNaNfkQCwkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTQChTmTQCnTQCrTxlZQDgFNaNfkQDhmQDlQDoxlZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTQChTmTQCnTQCrTxmZQDgFNaNfkQDhmQDlQDoxmZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTQChTmTQCnTxmZQDcFNaNfkQDdmQDhxmZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTQChTxkTQCoZQDaFNaNfkQDbxkQDgZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTQChTxkTkZQCyFNaNfkQCzxkkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTQChTxkZQCwFNaNfkQCxxkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTQChTxmTQCoTQCsTmZQDgFNaNfkQDhxmQDmQDpmZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTQChTxmTQCoTxmZQDdFNaNfkQDexmQDjxmZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTQChZQCtFNaNfkQCuZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTbTQCjTQCnTEQEd3net7isemail15EmailStatusCodeZQEhFNaNfkbQEjQEmQByZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTiTEQDv8datetime4date5MonthTiZQDsFNaNfkiQBliZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTiTiZQCtFNaNfkiiZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTiZQCrFNaNfkiZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTkTkTkZQCvFNaNfkkkkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTkTxkTxkTxkZQDaFNaNfkkxkxkxkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTkZQCrFNaNfkkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTmTQCjTmTQCpTmTQCvTQCzZQDlFNaNfkmQDnmQDrmQDvQDyZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTmTmTymZQCwFNaNfkmmymZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTmTmZQCtFNaNfkmmZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTmZQCrFNaNfkmZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTsTEQDv8datetime4date5MonthThThThThTxlZQEbFNaNfksQBuhhhhxlZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTwTkTkZQCvFNaNfkwkkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTwTkZQCtFNaNfkwkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTwZQCrFNaNfkwZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTxdZQCsFNaNfkxdZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTxhTxhTxhTxhZQDbFNaNfkxhxhxhxhZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTxhTxhTxhZQCyFNaNfkxhxhxhZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTxkZQCsFNaNfkxkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTxmTxmZQCvFNaNfkxmxmZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTxsTQCkTxhZQCzFNaNfkxsQDcxhZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTxsTxEQDx8datetime4date5MonthTxhZQDvFNaNfkxsxQBoxhZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTxsZQCsFNaNfkxsZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTxtTQCkTxtTxtZQDcFNaNfkxtQDfxtxtZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa13_696e7465676572207769647468SQCt6traits10isIntegralTiTykTykTkTkTkZQDbFNaNfkykykkkkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTAaTPvZQDdFNaNfkQpQoZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTAxaTQeZQDeFNaNfkQqQsZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTAxaZQDbFNaNfkQnZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTAxhZQDbFNaNfkQnZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTAyAaZQDcFNaNfkQoZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTC14TypeInfo_ClassTkTkZQDtFNaNfkQBfkkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTEQEb8datetime4date5MonthZQDwFNaNfkQBiZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTPvZQDaFNaNfkQmZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTQCpTQCtTQCxZQDjFNaNfkQDkQDnQDqZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTQCpTQCtTiTQCzZQDlFNaNfkQDmQDpiQDtZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTQCpTQCtTmZQDhFNaNfkQDiQDlmZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTQCpTQCtZQDfFNaNfkQDgQDjZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTQCpTkZQDdFNaNfkQDekZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTQCpTmTQCvTQCzTxlZQDoFNaNfkQDpmQDtQDwxlZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTQCpTmTQCvTQCzTxmZQDoFNaNfkQDpmQDtQDwxmZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTQCpTmTQCvTxmZQDkFNaNfkQDlmQDpxmZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTQCpTxkTQCwZQDiFNaNfkQDjxkQDoZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTQCpTxkTkZQDgFNaNfkQDhxkkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTQCpTxkZQDeFNaNfkQDfxkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTQCpTxmTQCwTQDaTmZQDoFNaNfkQDpxmQDuQDxmZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTQCpTxmTQCwTxmZQDlFNaNfkQDmxmQDrxmZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTQCpZQDbFNaNfkQDcZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTbTQCrTQCvTEQEl3net7isemail15EmailStatusCodeZQEpFNaNfkbQErQEuQByZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTiTEQEd8datetime4date5MonthTiZQEaFNaNfkiQBliZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTiTiZQDbFNaNfkiiZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTiZQCzFNaNfkiZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTkTkTkZQDdFNaNfkkkkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTkTxkTxkTxkZQDiFNaNfkkxkxkxkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTkZQCzFNaNfkkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTmTQCrTmTQCxTmTQDdTQDhZQDtFNaNfkmQDvmQDzmQEdQEgZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTmTmTymZQDeFNaNfkmmymZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTmTmZQDbFNaNfkmmZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTmZQCzFNaNfkmZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTsTEQEd8datetime4date5MonthThThThThTxlZQEjFNaNfksQBuhhhhxlZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTwTkTkZQDdFNaNfkwkkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTwTkZQDbFNaNfkwkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTwZQCzFNaNfkwZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTxdZQDaFNaNfkxdZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTxhTxhTxhTxhZQDjFNaNfkxhxhxhxhZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTxhTxhTxhZQDgFNaNfkxhxhxhZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTxkZQDaFNaNfkxkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTxmTxmZQDdFNaNfkxmxmZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTxsTQCsTxhZQDhFNaNfkxsQDkxhZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTxsTxEQEf8datetime4date5MonthTxhZQEdFNaNfkxsxQBoxhZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTxsZQDaFNaNfkxsZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTxtTQCsTxtTxtZQDkFNaNfkxtQDnxtxtZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa17_696e746567657220707265636973696f6eSQDb6traits10isIntegralTiTykTykTkTkTkZQDjFNaNfkykykkkkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTAaTPvZQDhFNaNfkQpQoZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTAxaTQeZQDiFNaNfkQqQsZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTAxaZQDfFNaNfkQnZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTAxhZQDfFNaNfkQnZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTAyAaZQDgFNaNfkQoZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTC14TypeInfo_ClassTkTkZQDxFNaNfkQBfkkZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTEQEf8datetime4date5MonthZQEaFNaNfkQBiZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTPvZQDeFNaNfkQmZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTQCtTQCxTQDbZQDnFNaNfkQDoQDrQDuZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTQCtTQCxTiTQDdZQDpFNaNfkQDqQDtiQDxZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTQCtTQCxTmZQDlFNaNfkQDmQDpmZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTQCtTQCxZQDjFNaNfkQDkQDnZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTQCtTkZQDhFNaNfkQDikZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTQCtTmTQCzTQDdTxlZQDsFNaNfkQDtmQDxQEaxlZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTQCtTmTQCzTQDdTxmZQDsFNaNfkQDtmQDxQEaxmZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTQCtTmTQCzTxmZQDoFNaNfkQDpmQDtxmZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTQCtTxkTQDaZQDmFNaNfkQDnxkQDsZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTQCtTxkTkZQDkFNaNfkQDlxkkZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTQCtTxkZQDiFNaNfkQDjxkZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTQCtTxmTQDaTQDeTmZQDsFNaNfkQDtxmQDyQEbmZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTQCtTxmTQDaTxmZQDpFNaNfkQDqxmQDvxmZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTQCtZQDfFNaNfkQDgZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTbTQCvTQCzTEQEp3net7isemail15EmailStatusCodeZQEtFNaNfkbQEvQEyQByZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTiTEQEh8datetime4date5MonthTiZQEeFNaNfkiQBliZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTiTiZQDfFNaNfkiiZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTiZQDdFNaNfkiZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTkTkTkZQDhFNaNfkkkkZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTkTxkTxkTxkZQDmFNaNfkkxkxkxkZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTkZQDdFNaNfkkZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTmTQCvTmTQDbTmTQDhTQDlZQDxFNaNfkmQDzmQEdmQEhQEkZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTmTmTymZQDiFNaNfkmmymZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTmTmZQDfFNaNfkmmZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTmZQDdFNaNfkmZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTsTEQEh8datetime4date5MonthThThThThTxlZQEnFNaNfksQBuhhhhxlZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTwTkTkZQDhFNaNfkwkkZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTwTkZQDfFNaNfkwkZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTwZQDdFNaNfkwZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTxdZQDeFNaNfkxdZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTxhTxhTxhTxhZQDnFNaNfkxhxhxhxhZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTxhTxhTxhZQDkFNaNfkxhxhxhZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTxkZQDeFNaNfkxkZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTxmTxmZQDhFNaNfkxmxmZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTxsTQCwTxhZQDlFNaNfkxsQDoxhZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTxsTxEQEj8datetime4date5MonthTxhZQEhFNaNfkxsxQBoxhZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTxsZQDeFNaNfkxsZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTxtTQCwTxtTxtZQDoFNaNfkxtQDrxtxtZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa19_736570617261746f7220636861726163746572SQDf6traits10isSomeCharTwTykTykTkTkTkZQDnFNaNfkykykkkkZw@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTAaTPvZQDlFNaNfkQpQoZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTAxaTQeZQDmFNaNfkQqQsZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTAxaZQDjFNaNfkQnZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTAxhZQDjFNaNfkQnZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTAyAaZQDkFNaNfkQoZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTC14TypeInfo_ClassTkTkZQEbFNaNfkQBfkkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTEQEj8datetime4date5MonthZQEeFNaNfkQBiZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTPvZQDiFNaNfkQmZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTQCxTQDbTQDfZQDrFNaNfkQDsQDvQDyZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTQCxTQDbTiTQDhZQDtFNaNfkQDuQDxiQEbZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTQCxTQDbTmZQDpFNaNfkQDqQDtmZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTQCxTQDbZQDnFNaNfkQDoQDrZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTQCxTkZQDlFNaNfkQDmkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTQCxTmTQDdTQDhTxlZQDwFNaNfkQDxmQEbQEexlZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTQCxTmTQDdTQDhTxmZQDwFNaNfkQDxmQEbQEexmZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTQCxTmTQDdTxmZQDsFNaNfkQDtmQDxxmZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTQCxTxkTQDeZQDqFNaNfkQDrxkQDwZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTQCxTxkTkZQDoFNaNfkQDpxkkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTQCxTxkZQDmFNaNfkQDnxkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTQCxTxmTQDeTQDiTmZQDwFNaNfkQDxxmQEcQEfmZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTQCxTxmTQDeTxmZQDtFNaNfkQDuxmQDzxmZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTQCxZQDjFNaNfkQDkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTbTQCzTQDdTEQEt3net7isemail15EmailStatusCodeZQExFNaNfkbQEzQFcQByZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTiTEQEl8datetime4date5MonthTiZQEiFNaNfkiQBliZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTiTiZQDjFNaNfkiiZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTiZQDhFNaNfkiZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTkTkTkZQDlFNaNfkkkkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTkTxkTxkTxkZQDqFNaNfkkxkxkxkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTkZQDhFNaNfkkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTmTQCzTmTQDfTmTQDlTQDpZQEbFNaNfkmQEdmQEhmQElQEoZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTmTmTymZQDmFNaNfkmmymZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTmTmZQDjFNaNfkmmZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTmZQDhFNaNfkmZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTsTEQEl8datetime4date5MonthThThThThTxlZQErFNaNfksQBuhhhhxlZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTwTkTkZQDlFNaNfkwkkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTwTkZQDjFNaNfkwkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTwZQDhFNaNfkwZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTxdZQDiFNaNfkxdZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTxhTxhTxhTxhZQDrFNaNfkxhxhxhxhZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTxhTxhTxhZQDoFNaNfkxhxhxhZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTxkZQDiFNaNfkxkZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTxmTxmZQDlFNaNfkxmxmZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTxsTQDaTxhZQDpFNaNfkxsQDsxhZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTxsTxEQEn8datetime4date5MonthTxhZQElFNaNfkxsxQBoxhZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTxsZQDiFNaNfkxsZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTxtTQDaTxtTxtZQDsFNaNfkxtQDvxtxtZi@Base 12
+ _D3std6format8internal5write__T6getNthVAyaa21_736570617261746f72206469676974207769647468SQDj6traits10isIntegralTiTykTykTkTkTkZQDrFNaNfkykykkkkZi@Base 12
+ _D3std6format8internal5write__T8getWidthTAaZQnFNaNfQkZl@Base 12
+ _D3std6format8internal5write__T8getWidthTAwZQnFNaNbNiNfQoZl@Base 12
+ _D3std6format8internal5write__T8getWidthTAxaZQoFNaNfQlZl@Base 12
+ _D3std6format8internal5write__T8getWidthTAyaZQoFNaNfQlZl@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TAaTPvZQBzFNaNfkQpQoZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TAxaTQeZQCaFNaNfkQqQsZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TAxaZQBxFNaNfkQnZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TAxhZQBxFNaNfkQnZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TAyAaZQByFNaNfkQoZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TC14TypeInfo_ClassTkTkZQCpFNaNfkQBfkkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TEQCx8datetime4date5MonthZQCsFNaNfkQBiZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TPvZQBwFNaNfkQmZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TQBiTQBmTQBqZQCfFNaNfkQCdQCgQCjZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TQBiTQBmTiTQBsZQChFNaNfkQCfQCiiQCmZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TQBiTQBmTmZQCdFNaNfkQCbQCemZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TQBiTQBmZQCbFNaNfkQBzQCcZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TQBiTkZQBzFNaNfkQBxkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TQBiTmTQBoTQBsTxlZQCkFNaNfkQCimQCmQCpxlZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TQBiTmTQBoTQBsTxmZQCkFNaNfkQCimQCmQCpxmZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TQBiTmTQBoTxmZQCgFNaNfkQCemQCixmZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TQBiTxkTQBpZQCeFNaNfkQCcxkQChZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TQBiTxkTkZQCcFNaNfkQCaxkkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TQBiTxkZQCaFNaNfkQByxkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TQBiTxmTQBpTQBtTmZQCkFNaNfkQCixmQCnQCqmZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TQBiTxmTQBpTxmZQChFNaNfkQCfxmQCkxmZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TQBiZQBxFNaNfkQBvZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TbTQBkTQBoTEQDh3net7isemail15EmailStatusCodeZQDlFNaNfkbQDkQDnQByZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TiTEQCz8datetime4date5MonthTiZQCwFNaNfkiQBliZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TiTiZQBxFNaNfkiiZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TiZQBvFNaNfkiZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TkTkTkZQBzFNaNfkkkkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TkTxkTxkTxkZQCeFNaNfkkxkxkxkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TkZQBvFNaNfkkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TmTQBkTmTQBqTmTQBwTQCaZQCpFNaNfkmQComQCsmQCwQCzZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TmTmTymZQCaFNaNfkmmymZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TmTmZQBxFNaNfkmmZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TmZQBvFNaNfkmZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TsTEQCz8datetime4date5MonthThThThThTxlZQDfFNaNfksQBuhhhhxlZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TwTkTkZQBzFNaNfkwkkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TwTkZQBxFNaNfkwkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TwZQBvFNaNfkwZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TxdZQBwFNaNfkxdZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TxhTxhTxhTxhZQCfFNaNfkxhxhxhxhZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TxhTxhTxhZQCcFNaNfkxhxhxhZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TxkZQBwFNaNfkxkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TxmTxmZQBzFNaNfkxmxmZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TxsTQBlTxhZQCdFNaNfkxsQCdxhZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TxsTxEQDb8datetime4date5MonthTxhZQCzFNaNfkxsxQBoxhZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TxsZQBwFNaNfkxsZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TxtTQBlTxtTxtZQCgFNaNfkxtQCgxtxtZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa13_696e7465676572207769647468TykTykTkTkTkZQCfFNaNfkykykkkkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTAaTPvZQChFNaNfkQpQoZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTAxaTQeZQCiFNaNfkQqQsZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTAxaZQCfFNaNfkQnZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTAxhZQCfFNaNfkQnZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTAyAaZQCgFNaNfkQoZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTC14TypeInfo_ClassTkTkZQCxFNaNfkQBfkkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTEQDf8datetime4date5MonthZQDaFNaNfkQBiZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTPvZQCeFNaNfkQmZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTQBqTQBuTQByZQCnFNaNfkQClQCoQCrZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTQBqTQBuTiTQCaZQCpFNaNfkQCnQCqiQCuZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTQBqTQBuTmZQClFNaNfkQCjQCmmZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTQBqTQBuZQCjFNaNfkQChQCkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTQBqTkZQChFNaNfkQCfkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTQBqTmTQBwTQCaTxlZQCsFNaNfkQCqmQCuQCxxlZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTQBqTmTQBwTQCaTxmZQCsFNaNfkQCqmQCuQCxxmZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTQBqTmTQBwTxmZQCoFNaNfkQCmmQCqxmZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTQBqTxkTQBxZQCmFNaNfkQCkxkQCpZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTQBqTxkTkZQCkFNaNfkQCixkkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTQBqTxkZQCiFNaNfkQCgxkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTQBqTxmTQBxTQCbTmZQCsFNaNfkQCqxmQCvQCymZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTQBqTxmTQBxTxmZQCpFNaNfkQCnxmQCsxmZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTQBqZQCfFNaNfkQCdZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTbTQBsTQBwTEQDp3net7isemail15EmailStatusCodeZQDtFNaNfkbQDsQDvQByZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTiTEQDh8datetime4date5MonthTiZQDeFNaNfkiQBliZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTiTiZQCfFNaNfkiiZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTiZQCdFNaNfkiZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTkTkTkZQChFNaNfkkkkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTkTxkTxkTxkZQCmFNaNfkkxkxkxkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTkZQCdFNaNfkkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTmTQBsTmTQByTmTQCeTQCiZQCxFNaNfkmQCwmQDamQDeQDhZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTmTmTymZQCiFNaNfkmmymZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTmTmZQCfFNaNfkmmZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTmZQCdFNaNfkmZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTsTEQDh8datetime4date5MonthThThThThTxlZQDnFNaNfksQBuhhhhxlZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTwTkTkZQChFNaNfkwkkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTwTkZQCfFNaNfkwkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTwZQCdFNaNfkwZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTxdZQCeFNaNfkxdZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTxhTxhTxhTxhZQCnFNaNfkxhxhxhxhZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTxhTxhTxhZQCkFNaNfkxhxhxhZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTxkZQCeFNaNfkxkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTxmTxmZQChFNaNfkxmxmZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTxsTQBtTxhZQClFNaNfkxsQClxhZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTxsTxEQDj8datetime4date5MonthTxhZQDhFNaNfkxsxQBoxhZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTxsZQCeFNaNfkxsZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTxtTQBtTxtTxtZQCoFNaNfkxtQCoxtxtZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa17_696e746567657220707265636973696f6eTykTykTkTkTkZQCnFNaNfkykykkkkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TAaTPvZQCpFNaNfkQpQoZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TAxaTQeZQCqFNaNfkQqQsZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TAxaZQCnFNaNfkQnZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TAxhZQCnFNaNfkQnZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TAyAaZQCoFNaNfkQoZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TC14TypeInfo_ClassTkTkZQDfFNaNfkQBfkkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TEQDn8datetime4date5MonthZQDiFNaNfkQBiZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TPvZQCmFNaNfkQmZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TQByTQCcTQCgZQCvFNaNfkQCtQCwQCzZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TQByTQCcTiTQCiZQCxFNaNfkQCvQCyiQDcZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TQByTQCcTmZQCtFNaNfkQCrQCumZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TQByTQCcZQCrFNaNfkQCpQCsZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TQByTkZQCpFNaNfkQCnkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TQByTmTQCeTQCiTxlZQDaFNaNfkQCymQDcQDfxlZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TQByTmTQCeTQCiTxmZQDaFNaNfkQCymQDcQDfxmZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TQByTmTQCeTxmZQCwFNaNfkQCumQCyxmZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TQByTxkTQCfZQCuFNaNfkQCsxkQCxZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TQByTxkTkZQCsFNaNfkQCqxkkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TQByTxkZQCqFNaNfkQCoxkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TQByTxmTQCfTQCjTmZQDaFNaNfkQCyxmQDdQDgmZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TQByTxmTQCfTxmZQCxFNaNfkQCvxmQDaxmZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TQByZQCnFNaNfkQClZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TbTQCaTQCeTEQDx3net7isemail15EmailStatusCodeZQEbFNaNfkbQEaQEdQByZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TiTEQDp8datetime4date5MonthTiZQDmFNaNfkiQBliZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TiTiZQCnFNaNfkiiZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TiZQClFNaNfkiZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TkTkTkZQCpFNaNfkkkkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TkTxkTxkTxkZQCuFNaNfkkxkxkxkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TkZQClFNaNfkkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TmTQCaTmTQCgTmTQCmTQCqZQDfFNaNfkmQDemQDimQDmQDpZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TmTmTymZQCqFNaNfkmmymZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TmTmZQCnFNaNfkmmZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TmZQClFNaNfkmZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TsTEQDp8datetime4date5MonthThThThThTxlZQDvFNaNfksQBuhhhhxlZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TwTkTkZQCpFNaNfkwkkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TwTkZQCnFNaNfkwkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TwZQClFNaNfkwZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TxdZQCmFNaNfkxdZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TxhTxhTxhTxhZQCvFNaNfkxhxhxhxhZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TxhTxhTxhZQCsFNaNfkxhxhxhZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TxkZQCmFNaNfkxkZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TxmTxmZQCpFNaNfkxmxmZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TxsTQCbTxhZQCtFNaNfkxsQCtxhZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TxsTxEQDr8datetime4date5MonthTxhZQDpFNaNfkxsxQBoxhZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TxsZQCmFNaNfkxsZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TxtTQCbTxtTxtZQCwFNaNfkxtQCwxtxtZi@Base 12
+ _D3std6format8internal5write__T9getNthIntVAyaa21_736570617261746f72206469676974207769647468TykTykTkTkTkZQCvFNaNfkykykkkkZi@Base 12
+ _D3std6format8internal6floats11__moduleRefZ@Base 12
+ _D3std6format8internal6floats12__ModuleInfoZ@Base 12
+ _D3std6format8internal6floats__T10printFloatTDFNaNbNfAxaZvTeTaZQBfFNaNfKQBbxeSQCyQCx4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal6floats__T10printFloatTSQBsQBr8NoOpSinkTdTaZQBiFNaNfKQBexdSQDbQDa4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal6floats__T10printFloatTSQBsQBr__T7sformatTaTxdZQoFNkMAaMAxaxdZ4SinkTdTaZQCkFNaNfKQCgxdSQEdQEc4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std6format8internal6floats__T11printFloatATDFNaNbNfAxaZvTeTaZQBgFNaNfKQBbxeSQCzQCy4spec__T10FormatSpecTaZQpAyaimbZv@Base 12
+ _D3std6format8internal6floats__T11printFloatATSQBtQBs8NoOpSinkTdTaZQBjFNaNfKQBexdSQDcQDb4spec__T10FormatSpecTaZQpAyaimbZv@Base 12
+ _D3std6format8internal6floats__T11printFloatATSQBtQBs__T7sformatTaTxdZQoFNkMAaMAxaxdZ4SinkTdTaZQClFNaNfKQCgxdSQEeQEd4spec__T10FormatSpecTaZQpAyaimbZv@Base 12
+ _D3std6format8internal6floats__T11printFloatEVbi0TDFNaNbNfAxaZvTeTaZQBkFNaNfKQBbxeSQDdQDc4spec__T10FormatSpecTaZQpAyaimbZv@Base 12
+ _D3std6format8internal6floats__T11printFloatEVbi0TSQBxQBw8NoOpSinkTdTaZQBnFNaNfKQBexdSQDgQDf4spec__T10FormatSpecTaZQpAyaimbZv@Base 12
+ _D3std6format8internal6floats__T11printFloatEVbi0TSQBxQBw__T7sformatTaTxdZQoFNkMAaMAxaxdZ4SinkTdTaZQCpFNaNfKQCgxdSQEiQEh4spec__T10FormatSpecTaZQpAyaimbZv@Base 12
+ _D3std6format8internal6floats__T11printFloatEVbi1TDFNaNbNfAxaZvTeTaZQBkFNaNfKQBbxeSQDdQDc4spec__T10FormatSpecTaZQpAyaimbZv@Base 12
+ _D3std6format8internal6floats__T11printFloatEVbi1TSQBxQBw8NoOpSinkTdTaZQBnFNaNfKQBexdSQDgQDf4spec__T10FormatSpecTaZQpAyaimbZv@Base 12
+ _D3std6format8internal6floats__T11printFloatEVbi1TSQBxQBw__T7sformatTaTxdZQoFNkMAaMAxaxdZ4SinkTdTaZQCpFNaNfKQCgxdSQEiQEh4spec__T10FormatSpecTaZQpAyaimbZv@Base 12
+ _D3std6format8internal6floats__T11printFloatFVbi0TDFNaNbNfAxaZvTeTaZQBkFNaNfKQBbxeSQDdQDc4spec__T10FormatSpecTaZQpAyaimbZv@Base 12
+ _D3std6format8internal6floats__T11printFloatFVbi0TSQBxQBw8NoOpSinkTdTaZQBnFNaNfKQBexdSQDgQDf4spec__T10FormatSpecTaZQpAyaimbZv@Base 12
+ _D3std6format8internal6floats__T11printFloatFVbi0TSQBxQBw__T7sformatTaTxdZQoFNkMAaMAxaxdZ4SinkTdTaZQCpFNaNfKQCgxdSQEiQEh4spec__T10FormatSpecTaZQpAyaimbZv@Base 12
+ _D3std6format8internal6floats__T11printFloatFVbi1TDFNaNbNfAxaZvTeTaZQBkFNaNfKQBbxeSQDdQDc4spec__T10FormatSpecTaZQpAyaimbZv@Base 12
+ _D3std6format8internal6floats__T11printFloatFVbi1TSQBxQBw8NoOpSinkTdTaZQBnFNaNfKQBexdSQDgQDf4spec__T10FormatSpecTaZQpAyaimbZv@Base 12
+ _D3std6format8internal6floats__T11printFloatFVbi1TSQBxQBw__T7sformatTaTxdZQoFNkMAaMAxaxdZ4SinkTdTaZQCpFNaNfKQCgxdSQEiQEh4spec__T10FormatSpecTaZQpAyaimbZv@Base 12
+ _D3std6format8internal6floats__T11printFloatGTDFNaNbNfAxaZvTeTaZQBgFNaNfKQBbxeSQCzQCy4spec__T10FormatSpecTaZQpAyaimbZv@Base 12
+ _D3std6format8internal6floats__T11printFloatGTSQBtQBs8NoOpSinkTdTaZQBjFNaNfKQBexdSQDcQDb4spec__T10FormatSpecTaZQpAyaimbZv@Base 12
+ _D3std6format8internal6floats__T11printFloatGTSQBtQBs__T7sformatTaTxdZQoFNkMAaMAxaxdZ4SinkTdTaZQClFNaNfKQCgxdSQEeQEd4spec__T10FormatSpecTaZQpAyaimbZv@Base 12
+ _D3std6format__T11guessLengthTaTAyaZQuFNaNfQlZm@Base 12
+ _D3std6format__T7sformatTaTxdZQoFNaNfNkMAaMAxaxdZQj@Base 12
+ _D3std6format__T7sformatTaTxdZQoFNkMAaMAxaxdZ4Sink11__xopEqualsMxFKxSQCpQCo__TQCkTaTxdZQCtFNkMQCgMQChxdZQChZb@Base 12
+ _D3std6format__T7sformatTaTxdZQoFNkMAaMAxaxdZ4Sink3putMFNaNbNfMQyZv@Base 12
+ _D3std6format__T7sformatTaTxdZQoFNkMAaMAxaxdZ4Sink3putMFNaNfMAxuZv@Base 12
+ _D3std6format__T7sformatTaTxdZQoFNkMAaMAxaxdZ4Sink3putMFNaNfMAxwZv@Base 12
+ _D3std6format__T7sformatTaTxdZQoFNkMAaMAxaxdZ4Sink3putMFNaNfwZv@Base 12
+ _D3std6format__T7sformatTaTxdZQoFNkMAaMAxaxdZ4Sink6__initZ@Base 12
+ _D3std6format__T7sformatTaTxdZQoFNkMAaMAxaxdZ4Sink9__xtoHashFNbNeKxSQCoQCn__TQCjTaTxdZQCsFNkMQCfMQCgxdZQCgZm@Base 12
+ _D3std6format__T7sformatTaTykTykTkTkTkZQxFNaNfNkMAaMAxaykykkkkZQo@Base 12
+ _D3std6format__T7sformatTaTykTykTkTkTkZQxFNkMAaMAxaykykkkkZ4Sink11__xopEqualsMxFKxSQDdQDc__TQCyTaTykTykTkTkTkZQDqFNkMQCuMQCvykykkkkZQCvZb@Base 12
+ _D3std6format__T7sformatTaTykTykTkTkTkZQxFNkMAaMAxaykykkkkZ4Sink3putMFNaNbNfMQBdZv@Base 12
+ _D3std6format__T7sformatTaTykTykTkTkTkZQxFNkMAaMAxaykykkkkZ4Sink3putMFNaNfMAxuZv@Base 12
+ _D3std6format__T7sformatTaTykTykTkTkTkZQxFNkMAaMAxaykykkkkZ4Sink3putMFNaNfMAxwZv@Base 12
+ _D3std6format__T7sformatTaTykTykTkTkTkZQxFNkMAaMAxaykykkkkZ4Sink3putMFNaNfwZv@Base 12
+ _D3std6format__T7sformatTaTykTykTkTkTkZQxFNkMAaMAxaykykkkkZ4Sink6__initZ@Base 12
+ _D3std6format__T7sformatTaTykTykTkTkTkZQxFNkMAaMAxaykykkkkZ4Sink9__xtoHashFNbNeKxSQDcQDb__TQCxTaTykTykTkTkTkZQDpFNkMQCtMQCuykykkkkZQCuZm@Base 12
+ _D3std6format__T7sformatVAyaa5_252e313867TxdZQBdFNaNfAaxdZQf@Base 12
+ _D3std6format__TQkTaTAaTPvZQvFNaNfIAaQqQpZAya@Base 12
+ _D3std6format__TQkTaTAxaTQeZQwFNaNfIAaQrQtZAya@Base 12
+ _D3std6format__TQkTaTAyAaZQuFNaNfIAaQpZAya@Base 12
+ _D3std6format__TQkTaTAyaTQeTmZQyFNaNfIAaQtQvmZQz@Base 12
+ _D3std6format__TQkTaTAyaTQeZQwFNaNfIAaQrQtZQw@Base 12
+ _D3std6format__TQkTaTAyaTxkTQhZQzFNaNfIAaQuxkQyZQBb@Base 12
+ _D3std6format__TQkTaTAyaTxkTkZQyFNaNfIAaQtxkkZQz@Base 12
+ _D3std6format__TQkTaTAyaTxkZQwFNaNfIAaQrxkZQw@Base 12
+ _D3std6format__TQkTaTAyaZQtFNaNfIAaQoZQr@Base 12
+ _D3std6format__TQkTaTC14TypeInfo_ClassTkTkZQBlFNaNfIAaQBhkkZAya@Base 12
+ _D3std6format__TQkTaTEQu8datetime4date5MonthZQBnFNaNfIAaQBjZAya@Base 12
+ _D3std6format__TQkTaTbTAyaTQeTEQBd3net7isemail15EmailStatusCodeZQCgFNaNfIAabQCbQCeQCaZQCl@Base 12
+ _D3std6format__TQkTaTiTEQw8datetime4date5MonthTiZQBrFNaNfIAaiQBmiZAya@Base 12
+ _D3std6format__TQkTaTiZQrFNaNfIAaiZAya@Base 12
+ _D3std6format__TQkTaTwZQrFNaNfIAawZAya@Base 12
+ _D3std6format__TQkTaTxhTxhTxhTxhZQBbFNaNfIAaxhxhxhxhZAya@Base 12
+ _D3std6format__TQkTaTxhTxhTxhZQyFNaNfIAaxhxhxhZAya@Base 12
+ _D3std6format__TQkTaTxmTxmZQvFNaNfIAaxmxmZAya@Base 12
+ _D3std6format__TQkTaTxsTxEQy8datetime4date5MonthTxhZQBuFNaNfIAaxsxQBpxhZAya@Base 12
+ _D3std6format__TQkTaTxsZQsFNaNfIAaxsZAya@Base 12
+ _D3std6format__TQkTaTxtTAyaTxtTxtZQBcFNaNfIAaxtQxxtxtZQBe@Base 12
+ _D3std6format__TQkVAyaa35_737461636b2e6c656e677468202d206174202573206d7573742062652032206f722033TmZQDpFNaNfmZQDm@Base 12
+ _D3std6format__TQkVAyaa39_7372632e6c656e677468202573206d75737420657175616c20646573742e6c656e677468202573TmTmZQDzFNaNfmmZQDx@Base 12
+ _D3std6format__TQkVAyaa42_74656d702e6c656e677468202573203e3d2072616e67652e6c656e677468202573202d206d6964202573TmTmTymZQEiFNaNfmmymZQEi@Base 12
+ _D3std6getopt10assignCharw@Base 12
+ _D3std6getopt10optionCharw@Base 12
+ _D3std6getopt11__moduleRefZ@Base 12
+ _D3std6getopt11splitAndGetFNaNbNeAyaZSQBkQBj6Option@Base 12
+ _D3std6getopt12GetoptResult11__xopEqualsMxFKxSQBsQBrQBnZb@Base 12
+ _D3std6getopt12GetoptResult6__initZ@Base 12
+ _D3std6getopt12GetoptResult9__xtoHashFNbNeKxSQBrQBqQBmZm@Base 12
+ _D3std6getopt12__ModuleInfoZ@Base 12
+ _D3std6getopt12endOfOptionsAya@Base 12
+ _D3std6getopt13configuration11passThroughMFNaNbNdNiNfbZv@Base 12
+ _D3std6getopt13configuration11passThroughMxFNaNbNdNiNfZb@Base 12
+ _D3std6getopt13configuration13caseSensitiveMFNaNbNdNiNfbZv@Base 12
+ _D3std6getopt13configuration13caseSensitiveMxFNaNbNdNiNfZb@Base 12
+ _D3std6getopt13configuration16keepEndOfOptionsMFNaNbNdNiNfbZv@Base 12
+ _D3std6getopt13configuration16keepEndOfOptionsMxFNaNbNdNiNfZb@Base 12
+ _D3std6getopt13configuration20stopOnFirstNonOptionMFNaNbNdNiNfbZv@Base 12
+ _D3std6getopt13configuration20stopOnFirstNonOptionMxFNaNbNdNiNfZb@Base 12
+ _D3std6getopt13configuration6__initZ@Base 12
+ _D3std6getopt13configuration8bundlingMFNaNbNdNiNfbZv@Base 12
+ _D3std6getopt13configuration8bundlingMxFNaNbNdNiNfZb@Base 12
+ _D3std6getopt13configuration8requiredMFNaNbNdNiNfbZv@Base 12
+ _D3std6getopt13configuration8requiredMxFNaNbNdNiNfZb@Base 12
+ _D3std6getopt15GetOptException6__initZ@Base 12
+ _D3std6getopt15GetOptException6__vtblZ@Base 12
+ _D3std6getopt15GetOptException7__ClassZ@Base 12
+ _D3std6getopt15GetOptException8__mixin16__ctorMFNaNbNiNfAyaC6object9ThrowableQvmZCQDcQDbQCx@Base 12
+ _D3std6getopt15GetOptException8__mixin16__ctorMFNaNbNiNfAyaQdmC6object9ThrowableZCQDcQDbQCx@Base 12
+ _D3std6getopt20defaultGetoptPrinterFNfAyaASQBpQBo6OptionZ9__lambda3FNeZSQCs5stdio4File17LockingTextWriter@Base 12
+ _D3std6getopt20defaultGetoptPrinterFNfAyaASQBpQBo6OptionZv@Base 12
+ _D3std6getopt6Option11__xopEqualsMxFKxSQBlQBkQBgZb@Base 12
+ _D3std6getopt6Option6__initZ@Base 12
+ _D3std6getopt6Option9__xtoHashFNbNeKxSQBkQBjQBfZm@Base 12
+ _D3std6getopt8arraySepAya@Base 12
+ _D3std6getopt8optMatchFNfAyaMQeKQhSQBhQBg13configurationZb@Base 12
+ _D3std6getopt9setConfigFNaNbNiNfKSQBgQBf13configurationEQCcQCb6configZv@Base 12
+ _D3std6getopt__T22defaultGetoptFormatterTSQBo5stdio4File17LockingTextWriterZQCiFNfQBpAyaASQDkQDj6OptionQsZv@Base 12
+ _D3std6int12811__moduleRefZ@Base 12
+ _D3std6int12812__ModuleInfoZ@Base 12
+ _D3std6int1286Int12811__xopEqualsMxFKxSQBlQBkQBgZb@Base 12
+ _D3std6int1286Int1285opCmpMxFNaNbNiNfSQBkQBjQBfZi@Base 12
+ _D3std6int1286Int1285opCmpMxFNaNbNiNflZi@Base 12
+ _D3std6int1286Int1286__ctorMFNaNbNcNiNfS4coreQBn4CentZSQCbQCaQBw@Base 12
+ _D3std6int1286Int1286__ctorMFNaNbNcNiNflZSQBoQBnQBj@Base 12
+ _D3std6int1286Int1286__ctorMFNaNbNcNiNfllZSQBpQBoQBk@Base 12
+ _D3std6int1286Int1286__ctorMFNaNbNcNiNfmZSQBoQBnQBj@Base 12
+ _D3std6int1286Int1286__initZ@Base 12
+ _D3std6int1286Int1286toHashMxFNaNbNiNfZm@Base 12
+ _D3std6int1286Int1288__xopCmpMxFKxSQBhQBgQBcZi@Base 12
+ _D3std6int1286Int1288opEqualsMxFNaNbNiNfSQBnQBmQBiZb@Base 12
+ _D3std6int1286Int1288opEqualsMxFNaNbNiNflZb@Base 12
+ _D3std6int1286Int1288opEqualsMxFNaNbNiNfmZb@Base 12
+ _D3std6mmfile11__moduleRefZ@Base 12
+ _D3std6mmfile12__ModuleInfoZ@Base 12
+ _D3std6mmfile6MmFile10__aggrDtorMFZv@Base 12
+ _D3std6mmfile6MmFile11__fieldDtorMFNeZv@Base 12
+ _D3std6mmfile6MmFile12ensureMappedMFmZv@Base 12
+ _D3std6mmfile6MmFile12ensureMappedMFmmZv@Base 12
+ _D3std6mmfile6MmFile13opIndexAssignMFhmZh@Base 12
+ _D3std6mmfile6MmFile3mapMFmmZv@Base 12
+ _D3std6mmfile6MmFile4modeMFZEQBbQBaQw4Mode@Base 12
+ _D3std6mmfile6MmFile5flushMFZv@Base 12
+ _D3std6mmfile6MmFile5unmapMFZv@Base 12
+ _D3std6mmfile6MmFile6__ctorMFAyaEQBfQBeQBa4ModemPvmZCQBzQByQBu@Base 12
+ _D3std6mmfile6MmFile6__ctorMFAyaZCQBgQBfQBb@Base 12
+ _D3std6mmfile6MmFile6__ctorMFSQBc5stdio4FileEQBrQBqQBm4ModemPvmZCQClQCkQCg@Base 12
+ _D3std6mmfile6MmFile6__ctorMFiEQBdQBcQy4ModemPvmZCQBwQBvQBr@Base 12
+ _D3std6mmfile6MmFile6__dtorMFZv@Base 12
+ _D3std6mmfile6MmFile6__initZ@Base 12
+ _D3std6mmfile6MmFile6__vtblZ@Base 12
+ _D3std6mmfile6MmFile6lengthMxFNdZm@Base 12
+ _D3std6mmfile6MmFile6mappedMFmZi@Base 12
+ _D3std6mmfile6MmFile7__ClassZ@Base 12
+ _D3std6mmfile6MmFile7opIndexMFmZh@Base 12
+ _D3std6mmfile6MmFile7opSliceMFZAv@Base 12
+ _D3std6mmfile6MmFile7opSliceMFmmZAv@Base 12
+ _D3std6random11__moduleRefZ@Base 12
+ _D3std6random12__ModuleInfoZ@Base 12
+ _D3std6random12fallbackSeedFNbNiZ11initializedOb@Base 12
+ _D3std6random12fallbackSeedFNbNiZ4seedOm@Base 12
+ _D3std6random12fallbackSeedFNbNiZ6fmix64FNaNbNiNfmZm@Base 12
+ _D3std6random12fallbackSeedFNbNiZm@Base 12
+ _D3std6random13bootstrapSeedFNbNiZm@Base 12
+ _D3std6random17unpredictableSeedFNbNdNiNeZk@Base 12
+ _D3std6random18RandomCoverChoices10__postblitMFNaNbNiNeZv@Base 12
+ _D3std6random18RandomCoverChoices13opIndexAssignMFNaNbNiNebmZv@Base 12
+ _D3std6random18RandomCoverChoices6__ctorMFNaNbNcNiNemZSQCbQCaQBw@Base 12
+ _D3std6random18RandomCoverChoices6__dtorMFNaNbNiNeZv@Base 12
+ _D3std6random18RandomCoverChoices6__initZ@Base 12
+ _D3std6random18RandomCoverChoices6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std6random18RandomCoverChoices7opIndexMxFNaNbNiNemZb@Base 12
+ _D3std6random6rndGenFNbNcNdNiNfZ11initializedb@Base 12
+ _D3std6random6rndGenFNbNcNdNiNfZ6resultSQBmQBl__T21MersenneTwisterEngineTkVmi32Vmi624Vmi397Vmi31Vki2567483615Vmi11Vki4294967295Vmi7Vki2636928640Vmi15Vki4022730752Vmi18Vki1812433253ZQFc@Base 12
+ _D3std6random6rndGenFNbNcNdNiNfZSQBfQBe__T21MersenneTwisterEngineTkVmi32Vmi624Vmi397Vmi31Vki2567483615Vmi11Vki4294967295Vmi7Vki2636928640Vmi15Vki4022730752Vmi18Vki1812433253ZQFc@Base 12
+ _D3std6random__T12initMTEngineTSQBeQBd__T21MersenneTwisterEngineTkVmi32Vmi624Vmi397Vmi31Vki2567483615Vmi11Vki4294967295Vmi7Vki2636928640Vmi15Vki4022730752Vmi18Vki1812433253ZQFcZQGfFNbNiNfKQGbZv@Base 12
+ _D3std6random__T14XorshiftEngineTkVki128Vii11ViN8ViN19ZQBn4saveMxFNaNbNdNiNfZSQCyQCx__TQCtTkVki128Vii11ViN8ViN19ZQDt@Base 12
+ _D3std6random__T14XorshiftEngineTkVki128Vii11ViN8ViN19ZQBn5frontMxFNaNbNdNiNfZk@Base 12
+ _D3std6random__T14XorshiftEngineTkVki128Vii11ViN8ViN19ZQBn6__initZ@Base 12
+ _D3std6random__T14XorshiftEngineTkVki128Vii11ViN8ViN19ZQBn8popFrontMFNaNbNiNfZv@Base 12
+ _D3std6random__T14XorshiftEngineTkVki160Vii2ViN1ViN4ZQBl4saveMxFNaNbNdNiNfZSQCwQCv__TQCrTkVki160Vii2ViN1ViN4ZQDp@Base 12
+ _D3std6random__T14XorshiftEngineTkVki160Vii2ViN1ViN4ZQBl5frontMxFNaNbNdNiNfZk@Base 12
+ _D3std6random__T14XorshiftEngineTkVki160Vii2ViN1ViN4ZQBl6__initZ@Base 12
+ _D3std6random__T14XorshiftEngineTkVki160Vii2ViN1ViN4ZQBl8popFrontMFNaNbNiNfZv@Base 12
+ _D3std6random__T14XorshiftEngineTkVki192ViN2Vii1Vii4ZQBl4saveMxFNaNbNdNiNfZSQCwQCv__TQCrTkVki192ViN2Vii1Vii4ZQDp@Base 12
+ _D3std6random__T14XorshiftEngineTkVki192ViN2Vii1Vii4ZQBl5frontMxFNaNbNdNiNfZk@Base 12
+ _D3std6random__T14XorshiftEngineTkVki192ViN2Vii1Vii4ZQBl6__initZ@Base 12
+ _D3std6random__T14XorshiftEngineTkVki192ViN2Vii1Vii4ZQBl8popFrontMFNaNbNiNfZv@Base 12
+ _D3std6random__T14XorshiftEngineTkVki32Vii13ViN17Vii15ZQBn4saveMxFNaNbNdNiNfZSQCyQCx__TQCtTkVki32Vii13ViN17Vii15ZQDt@Base 12
+ _D3std6random__T14XorshiftEngineTkVki32Vii13ViN17Vii15ZQBn5frontMxFNaNbNdNiNfZk@Base 12
+ _D3std6random__T14XorshiftEngineTkVki32Vii13ViN17Vii15ZQBn6__initZ@Base 12
+ _D3std6random__T14XorshiftEngineTkVki32Vii13ViN17Vii15ZQBn8popFrontMFNaNbNiNfZv@Base 12
+ _D3std6random__T14XorshiftEngineTkVki64Vii10ViN13ViN10ZQBn4saveMxFNaNbNdNiNfZSQCyQCx__TQCtTkVki64Vii10ViN13ViN10ZQDt@Base 12
+ _D3std6random__T14XorshiftEngineTkVki64Vii10ViN13ViN10ZQBn5frontMxFNaNbNdNiNfZk@Base 12
+ _D3std6random__T14XorshiftEngineTkVki64Vii10ViN13ViN10ZQBn6__initZ@Base 12
+ _D3std6random__T14XorshiftEngineTkVki64Vii10ViN13ViN10ZQBn8popFrontMFNaNbNiNfZv@Base 12
+ _D3std6random__T14XorshiftEngineTkVki96Vii10ViN5ViN26ZQBm4saveMxFNaNbNdNiNfZSQCxQCw__TQCsTkVki96Vii10ViN5ViN26ZQDr@Base 12
+ _D3std6random__T14XorshiftEngineTkVki96Vii10ViN5ViN26ZQBm5frontMxFNaNbNdNiNfZk@Base 12
+ _D3std6random__T14XorshiftEngineTkVki96Vii10ViN5ViN26ZQBm6__initZ@Base 12
+ _D3std6random__T14XorshiftEngineTkVki96Vii10ViN5ViN26ZQBm8popFrontMFNaNbNiNfZv@Base 12
+ _D3std6random__T17unpredictableSeedTmZQwFNbNdNiNeZm@Base 12
+ _D3std6random__T21MersenneTwisterEngineTkVmi32Vmi624Vmi397Vmi31Vki2567483615Vmi11Vki4294967295Vmi7Vki2636928640Vmi15Vki4022730752Vmi18Vki1812433253ZQFc12defaultStateFNaNbNiNfZSQGsQGr__TQGnTkVmi32Vmi624Vmi397Vmi31Vki2567483615Vmi11Vki4294967295Vmi7Vki2636928640Vmi15Vki4022730752Vmi18Vki1812433253ZQKv5State@Base 12
+ _D3std6random__T21MersenneTwisterEngineTkVmi32Vmi624Vmi397Vmi31Vki2567483615Vmi11Vki4294967295Vmi7Vki2636928640Vmi15Vki4022730752Vmi18Vki1812433253ZQFc12popFrontImplFNaNbNiNfKSQGsQGr__TQGnTkVmi32Vmi624Vmi397Vmi31Vki2567483615Vmi11Vki4294967295Vmi7Vki2636928640Vmi15Vki4022730752Vmi18Vki1812433253ZQKv5StateZv@Base 12
+ _D3std6random__T21MersenneTwisterEngineTkVmi32Vmi624Vmi397Vmi31Vki2567483615Vmi11Vki4294967295Vmi7Vki2636928640Vmi15Vki4022730752Vmi18Vki1812433253ZQFc4saveMxFNaNbNdNiNfZSQGnQGm__TQGiTkVmi32Vmi624Vmi397Vmi31Vki2567483615Vmi11Vki4294967295Vmi7Vki2636928640Vmi15Vki4022730752Vmi18Vki1812433253ZQKq@Base 12
+ _D3std6random__T21MersenneTwisterEngineTkVmi32Vmi624Vmi397Vmi31Vki2567483615Vmi11Vki4294967295Vmi7Vki2636928640Vmi15Vki4022730752Vmi18Vki1812433253ZQFc5State6__initZ@Base 12
+ _D3std6random__T21MersenneTwisterEngineTkVmi32Vmi624Vmi397Vmi31Vki2567483615Vmi11Vki4294967295Vmi7Vki2636928640Vmi15Vki4022730752Vmi18Vki1812433253ZQFc5frontMxFNaNbNdNiNfZk@Base 12
+ _D3std6random__T21MersenneTwisterEngineTkVmi32Vmi624Vmi397Vmi31Vki2567483615Vmi11Vki4294967295Vmi7Vki2636928640Vmi15Vki4022730752Vmi18Vki1812433253ZQFc6__ctorMFNaNbNcNiNfkZSQGpQGo__TQGkTkVmi32Vmi624Vmi397Vmi31Vki2567483615Vmi11Vki4294967295Vmi7Vki2636928640Vmi15Vki4022730752Vmi18Vki1812433253ZQKs@Base 12
+ _D3std6random__T21MersenneTwisterEngineTkVmi32Vmi624Vmi397Vmi31Vki2567483615Vmi11Vki4294967295Vmi7Vki2636928640Vmi15Vki4022730752Vmi18Vki1812433253ZQFc6__initZ@Base 12
+ _D3std6random__T21MersenneTwisterEngineTkVmi32Vmi624Vmi397Vmi31Vki2567483615Vmi11Vki4294967295Vmi7Vki2636928640Vmi15Vki4022730752Vmi18Vki1812433253ZQFc8popFrontMFNaNbNiNfZv@Base 12
+ _D3std6random__T21MersenneTwisterEngineTkVmi32Vmi624Vmi397Vmi31Vki2567483615Vmi11Vki4294967295Vmi7Vki2636928640Vmi15Vki4022730752Vmi18Vki1812433253ZQFc8seedImplFNaNbNiNfkKSQGoQGn__TQGjTkVmi32Vmi624Vmi397Vmi31Vki2567483615Vmi11Vki4294967295Vmi7Vki2636928640Vmi15Vki4022730752Vmi18Vki1812433253ZQKr5StateZv@Base 12
+ _D3std6random__T21MersenneTwisterEngineTkVmi32Vmi624Vmi397Vmi31Vki2567483615Vmi11Vki4294967295Vmi7Vki2636928640Vmi15Vki4022730752Vmi18Vki1812433253ZQFc__T4seedZQgMFNaNbNiNfkZv@Base 12
+ _D3std6random__T21MersenneTwisterEngineTmVmi64Vmi312Vmi156Vmi31VmN5403634167711393303Vmi29Vmi6148914691236517205Vmi17Vmi8202884508482404352Vmi37VmN2270628950310912Vmi43Vmi6364136223846793005ZQGt12defaultStateFNaNbNiNfZSQIjQIi__TQIeTmVmi64Vmi312Vmi156Vmi31VmN5403634167711393303Vmi29Vmi6148914691236517205Vmi17Vmi8202884508482404352Vmi37VmN2270628950310912Vmi43Vmi6364136223846793005ZQOd5State@Base 12
+ _D3std6random__T21MersenneTwisterEngineTmVmi64Vmi312Vmi156Vmi31VmN5403634167711393303Vmi29Vmi6148914691236517205Vmi17Vmi8202884508482404352Vmi37VmN2270628950310912Vmi43Vmi6364136223846793005ZQGt12popFrontImplFNaNbNiNfKSQIjQIi__TQIeTmVmi64Vmi312Vmi156Vmi31VmN5403634167711393303Vmi29Vmi6148914691236517205Vmi17Vmi8202884508482404352Vmi37VmN2270628950310912Vmi43Vmi6364136223846793005ZQOd5StateZv@Base 12
+ _D3std6random__T21MersenneTwisterEngineTmVmi64Vmi312Vmi156Vmi31VmN5403634167711393303Vmi29Vmi6148914691236517205Vmi17Vmi8202884508482404352Vmi37VmN2270628950310912Vmi43Vmi6364136223846793005ZQGt4saveMxFNaNbNdNiNfZSQIeQId__TQHzTmVmi64Vmi312Vmi156Vmi31VmN5403634167711393303Vmi29Vmi6148914691236517205Vmi17Vmi8202884508482404352Vmi37VmN2270628950310912Vmi43Vmi6364136223846793005ZQNy@Base 12
+ _D3std6random__T21MersenneTwisterEngineTmVmi64Vmi312Vmi156Vmi31VmN5403634167711393303Vmi29Vmi6148914691236517205Vmi17Vmi8202884508482404352Vmi37VmN2270628950310912Vmi43Vmi6364136223846793005ZQGt5State6__initZ@Base 12
+ _D3std6random__T21MersenneTwisterEngineTmVmi64Vmi312Vmi156Vmi31VmN5403634167711393303Vmi29Vmi6148914691236517205Vmi17Vmi8202884508482404352Vmi37VmN2270628950310912Vmi43Vmi6364136223846793005ZQGt5frontMxFNaNbNdNiNfZm@Base 12
+ _D3std6random__T21MersenneTwisterEngineTmVmi64Vmi312Vmi156Vmi31VmN5403634167711393303Vmi29Vmi6148914691236517205Vmi17Vmi8202884508482404352Vmi37VmN2270628950310912Vmi43Vmi6364136223846793005ZQGt6__ctorMFNaNbNcNiNfmZSQIgQIf__TQIbTmVmi64Vmi312Vmi156Vmi31VmN5403634167711393303Vmi29Vmi6148914691236517205Vmi17Vmi8202884508482404352Vmi37VmN2270628950310912Vmi43Vmi6364136223846793005ZQOa@Base 12
+ _D3std6random__T21MersenneTwisterEngineTmVmi64Vmi312Vmi156Vmi31VmN5403634167711393303Vmi29Vmi6148914691236517205Vmi17Vmi8202884508482404352Vmi37VmN2270628950310912Vmi43Vmi6364136223846793005ZQGt6__initZ@Base 12
+ _D3std6random__T21MersenneTwisterEngineTmVmi64Vmi312Vmi156Vmi31VmN5403634167711393303Vmi29Vmi6148914691236517205Vmi17Vmi8202884508482404352Vmi37VmN2270628950310912Vmi43Vmi6364136223846793005ZQGt8popFrontMFNaNbNiNfZv@Base 12
+ _D3std6random__T21MersenneTwisterEngineTmVmi64Vmi312Vmi156Vmi31VmN5403634167711393303Vmi29Vmi6148914691236517205Vmi17Vmi8202884508482404352Vmi37VmN2270628950310912Vmi43Vmi6364136223846793005ZQGt8seedImplFNaNbNiNfmKSQIfQIe__TQIaTmVmi64Vmi312Vmi156Vmi31VmN5403634167711393303Vmi29Vmi6148914691236517205Vmi17Vmi8202884508482404352Vmi37VmN2270628950310912Vmi43Vmi6364136223846793005ZQNz5StateZv@Base 12
+ _D3std6random__T21MersenneTwisterEngineTmVmi64Vmi312Vmi156Vmi31VmN5403634167711393303Vmi29Vmi6148914691236517205Vmi17Vmi8202884508482404352Vmi37VmN2270628950310912Vmi43Vmi6364136223846793005ZQGt__T4seedZQgMFNaNbNiNfmZv@Base 12
+ _D3std6random__T24LinearCongruentialEngineTkVki16807Vki0Vki2147483647ZQCc16primeFactorsOnlyFNaNbNiNfmZm@Base 12
+ _D3std6random__T24LinearCongruentialEngineTkVki16807Vki0Vki2147483647ZQCc34properLinearCongruentialParametersFNaNbNiNfmmmZb@Base 12
+ _D3std6random__T24LinearCongruentialEngineTkVki16807Vki0Vki2147483647ZQCc3gcdFNaNbNiNfmmZm@Base 12
+ _D3std6random__T24LinearCongruentialEngineTkVki16807Vki0Vki2147483647ZQCc4saveMxFNaNbNdNiNfZSQDnQDm__TQDiTkVki16807Vki0Vki2147483647ZQEn@Base 12
+ _D3std6random__T24LinearCongruentialEngineTkVki16807Vki0Vki2147483647ZQCc4seedMFNaNbNiNfkZv@Base 12
+ _D3std6random__T24LinearCongruentialEngineTkVki16807Vki0Vki2147483647ZQCc5frontMxFNaNbNdNiNfZk@Base 12
+ _D3std6random__T24LinearCongruentialEngineTkVki16807Vki0Vki2147483647ZQCc6__ctorMFNaNbNcNiNfkZSQDpQDo__TQDkTkVki16807Vki0Vki2147483647ZQEp@Base 12
+ _D3std6random__T24LinearCongruentialEngineTkVki16807Vki0Vki2147483647ZQCc6__initZ@Base 12
+ _D3std6random__T24LinearCongruentialEngineTkVki16807Vki0Vki2147483647ZQCc8popFrontMFNaNbNiNfZv@Base 12
+ _D3std6random__T24LinearCongruentialEngineTkVki48271Vki0Vki2147483647ZQCc16primeFactorsOnlyFNaNbNiNfmZm@Base 12
+ _D3std6random__T24LinearCongruentialEngineTkVki48271Vki0Vki2147483647ZQCc34properLinearCongruentialParametersFNaNbNiNfmmmZb@Base 12
+ _D3std6random__T24LinearCongruentialEngineTkVki48271Vki0Vki2147483647ZQCc3gcdFNaNbNiNfmmZm@Base 12
+ _D3std6random__T24LinearCongruentialEngineTkVki48271Vki0Vki2147483647ZQCc4saveMxFNaNbNdNiNfZSQDnQDm__TQDiTkVki48271Vki0Vki2147483647ZQEn@Base 12
+ _D3std6random__T24LinearCongruentialEngineTkVki48271Vki0Vki2147483647ZQCc4seedMFNaNbNiNfkZv@Base 12
+ _D3std6random__T24LinearCongruentialEngineTkVki48271Vki0Vki2147483647ZQCc5frontMxFNaNbNdNiNfZk@Base 12
+ _D3std6random__T24LinearCongruentialEngineTkVki48271Vki0Vki2147483647ZQCc6__ctorMFNaNbNcNiNfkZSQDpQDo__TQDkTkVki48271Vki0Vki2147483647ZQEp@Base 12
+ _D3std6random__T24LinearCongruentialEngineTkVki48271Vki0Vki2147483647ZQCc6__initZ@Base 12
+ _D3std6random__T24LinearCongruentialEngineTkVki48271Vki0Vki2147483647ZQCc8popFrontMFNaNbNiNfZv@Base 12
+ _D3std6socket10SocketType6__initZ@Base 12
+ _D3std6socket10getAddressFNfMAxaMQeZACQBkQBj7Address@Base 12
+ _D3std6socket10getAddressFNfMAxatZACQBiQBh7Address@Base 12
+ _D3std6socket10socketPairFNeZG2CQBeQBd6Socket@Base 12
+ _D3std6socket11AddressInfo11__xopEqualsMxFKxSQBrQBqQBmZb@Base 12
+ _D3std6socket11AddressInfo6__initZ@Base 12
+ _D3std6socket11AddressInfo9__xtoHashFNbNeKxSQBqQBpQBlZm@Base 12
+ _D3std6socket11UnixAddress10setNameLenMFNekZv@Base 12
+ _D3std6socket11UnixAddress4nameMFNaNbNdNiNjNfZPS4core3sys5posixQkQCh8sockaddr@Base 12
+ _D3std6socket11UnixAddress4nameMxFNaNbNdNiNjNfZPxS4core3sys5posixQkQCj8sockaddr@Base 12
+ _D3std6socket11UnixAddress4pathMxFNaNdNeZAya@Base 12
+ _D3std6socket11UnixAddress6__ctorMFNaNbNiNfS4core3sys5posixQk2un11sockaddr_unZCQCzQCyQCu@Base 12
+ _D3std6socket11UnixAddress6__ctorMFNaNbNiNfZCQBrQBqQBm@Base 12
+ _D3std6socket11UnixAddress6__ctorMFNaNeMAxaZCQBrQBqQBm@Base 12
+ _D3std6socket11UnixAddress6__initZ@Base 12
+ _D3std6socket11UnixAddress6__vtblZ@Base 12
+ _D3std6socket11UnixAddress7__ClassZ@Base 12
+ _D3std6socket11UnixAddress7nameLenMxFNaNbNdNiNeZk@Base 12
+ _D3std6socket11UnixAddress8toStringMxFNaNfZAya@Base 12
+ _D3std6socket11__moduleRefZ@Base 12
+ _D3std6socket12InternetHost12validHostentMFNfIPS4core3sys5posix5netdb7hostentZv@Base 12
+ _D3std6socket12InternetHost13getHostByAddrMFNeMAxaZb@Base 12
+ _D3std6socket12InternetHost13getHostByAddrMFNekZb@Base 12
+ _D3std6socket12InternetHost13getHostByNameMFNeMAxaZb@Base 12
+ _D3std6socket12InternetHost6__initZ@Base 12
+ _D3std6socket12InternetHost6__vtblZ@Base 12
+ _D3std6socket12InternetHost7__ClassZ@Base 12
+ _D3std6socket12InternetHost8populateMFNaNbPS4core3sys5posix5netdb7hostentZv@Base 12
+ _D3std6socket12InternetHost__T13getHostNoSyncVAyaa118_0a2020202020202020202020206175746f2078203d2068746f6e6c28706172616d293b0a2020202020202020202020206175746f206865203d20676574686f73746279616464722826782c20342c206361737428696e7429204164647265737346616d696c792e494e4554293b0a2020202020202020TkZQKdMFkZb@Base 12
+ _D3std6socket12InternetHost__T13getHostNoSyncVAyaa245_0a2020202020202020202020206175746f2078203d20696e65745f6164647228706172616d2e74656d7043537472696e672829293b0a202020202020202020202020656e666f726365287820213d20494e414444525f4e4f4e452c0a202020202020202020202020202020206e657720536f636b6574506172616d65746572457863657074696f6e2822496e76616c6964204950763420616464726573732229293b0a2020202020202020202020206175746f206865203d20676574686f73746279616464722826782c20342c206361737428696e7429204164647265737346616d696c792e494e4554293b0a2020202020202020TAxaZQTzMFQjZb@Base 12
+ _D3std6socket12InternetHost__T13getHostNoSyncVAyaa75_0a202020202020202020202020202020206175746f206865203d20676574686f737462796e616d6528706172616d2e74656d7043537472696e672829293b0a202020202020202020202020TAxaZQGwMFQjZb@Base 12
+ _D3std6socket12InternetHost__T7getHostVAyaa118_0a2020202020202020202020206175746f2078203d2068746f6e6c28706172616d293b0a2020202020202020202020206175746f206865203d20676574686f73746279616464722826782c20342c206361737428696e7429204164647265737346616d696c792e494e4554293b0a2020202020202020TkZQJwMFkZb@Base 12
+ _D3std6socket12InternetHost__T7getHostVAyaa245_0a2020202020202020202020206175746f2078203d20696e65745f6164647228706172616d2e74656d7043537472696e672829293b0a202020202020202020202020656e666f726365287820213d20494e414444525f4e4f4e452c0a202020202020202020202020202020206e657720536f636b6574506172616d65746572457863657074696f6e2822496e76616c6964204950763420616464726573732229293b0a2020202020202020202020206175746f206865203d20676574686f73746279616464722826782c20342c206361737428696e7429204164647265737346616d696c792e494e4554293b0a2020202020202020TAxaZQTsMFQjZb@Base 12
+ _D3std6socket12InternetHost__T7getHostVAyaa75_0a202020202020202020202020202020206175746f206865203d20676574686f737462796e616d6528706172616d2e74656d7043537472696e672829293b0a202020202020202020202020TAxaZQGpMFQjZb@Base 12
+ _D3std6socket12SocketOption6__initZ@Base 12
+ _D3std6socket12__ModuleInfoZ@Base 12
+ _D3std6socket12parseAddressFNfMAxaMQeZCQBlQBk7Address@Base 12
+ _D3std6socket12parseAddressFNfMAxatZCQBjQBi7Address@Base 12
+ _D3std6socket13HostException6__initZ@Base 12
+ _D3std6socket13HostException6__vtblZ@Base 12
+ _D3std6socket13HostException7__ClassZ@Base 12
+ _D3std6socket13HostException8__mixin16__ctorMFNfAyaC6object9ThrowableQvmiZCQCvQCuQCq@Base 12
+ _D3std6socket13HostException8__mixin16__ctorMFNfAyaQdmC6object9ThrowableiZCQCvQCuQCq@Base 12
+ _D3std6socket13HostException8__mixin16__ctorMFNfAyaiQemC6object9ThrowableZCQCvQCuQCq@Base 12
+ _D3std6socket13_SOCKET_ERRORxi@Base 12
+ _D3std6socket13serviceToPortFNfMAxaZt@Base 12
+ _D3std6socket14UnknownAddress4nameMFNaNbNdNiNjNfZPS4core3sys5posixQkQCk8sockaddr@Base 12
+ _D3std6socket14UnknownAddress4nameMxFNaNbNdNiNjNfZPxS4core3sys5posixQkQCm8sockaddr@Base 12
+ _D3std6socket14UnknownAddress6__initZ@Base 12
+ _D3std6socket14UnknownAddress6__vtblZ@Base 12
+ _D3std6socket14UnknownAddress7__ClassZ@Base 12
+ _D3std6socket14UnknownAddress7nameLenMxFNaNbNdNiNfZk@Base 12
+ _D3std6socket14formatGaiErrorFNeiZ12__critsec136OPv@Base 12
+ _D3std6socket14formatGaiErrorFNeiZAya@Base 12
+ _D3std6socket15InternetAddress12addrToStringFNbNekZAya@Base 12
+ _D3std6socket15InternetAddress12toAddrStringMxFNeZAya@Base 12
+ _D3std6socket15InternetAddress12toPortStringMxFNfZAya@Base 12
+ _D3std6socket15InternetAddress16toHostNameStringMxFNfZAya@Base 12
+ _D3std6socket15InternetAddress4addrMxFNaNbNdNiNfZk@Base 12
+ _D3std6socket15InternetAddress4nameMFNaNbNdNiNjNfZPS4core3sys5posixQkQCl8sockaddr@Base 12
+ _D3std6socket15InternetAddress4nameMxFNaNbNdNiNjNfZPxS4core3sys5posixQkQCn8sockaddr@Base 12
+ _D3std6socket15InternetAddress4portMxFNaNbNdNiNfZt@Base 12
+ _D3std6socket15InternetAddress5parseFNbNeMAxaZk@Base 12
+ _D3std6socket15InternetAddress6__ctorMFNaNbNiNfS4core3sys5posix7netinet3in_11sockaddr_inZCQDkQDjQDf@Base 12
+ _D3std6socket15InternetAddress6__ctorMFNaNbNiNfZCQBvQBuQBq@Base 12
+ _D3std6socket15InternetAddress6__ctorMFNaNbNiNfktZCQBxQBwQBs@Base 12
+ _D3std6socket15InternetAddress6__ctorMFNaNbNiNftZCQBwQBvQBr@Base 12
+ _D3std6socket15InternetAddress6__ctorMFNfMAxatZCQBuQBtQBp@Base 12
+ _D3std6socket15InternetAddress6__initZ@Base 12
+ _D3std6socket15InternetAddress6__vtblZ@Base 12
+ _D3std6socket15InternetAddress7__ClassZ@Base 12
+ _D3std6socket15InternetAddress7nameLenMxFNaNbNdNiNfZk@Base 12
+ _D3std6socket15InternetAddress8opEqualsMxFNfC6ObjectZb@Base 12
+ _D3std6socket15SocketException6__initZ@Base 12
+ _D3std6socket15SocketException6__vtblZ@Base 12
+ _D3std6socket15SocketException7__ClassZ@Base 12
+ _D3std6socket15SocketException8__mixin16__ctorMFNaNbNiNfAyaC6object9ThrowableQvmZCQDcQDbQCx@Base 12
+ _D3std6socket15SocketException8__mixin16__ctorMFNaNbNiNfAyaQdmC6object9ThrowableZCQDcQDbQCx@Base 12
+ _D3std6socket15lastSocketErrorFNdNfZAya@Base 12
+ _D3std6socket16AddressException6__initZ@Base 12
+ _D3std6socket16AddressException6__vtblZ@Base 12
+ _D3std6socket16AddressException7__ClassZ@Base 12
+ _D3std6socket16AddressException8__mixin16__ctorMFNfAyaC6object9ThrowableQvmiZCQCyQCxQCt@Base 12
+ _D3std6socket16AddressException8__mixin16__ctorMFNfAyaQdmC6object9ThrowableiZCQCyQCxQCt@Base 12
+ _D3std6socket16AddressException8__mixin16__ctorMFNfAyaiQemC6object9ThrowableZCQCyQCxQCt@Base 12
+ _D3std6socket16AddressInfoFlags6__initZ@Base 12
+ _D3std6socket16Internet6Address4addrMxFNaNbNdNiNfZG16h@Base 12
+ _D3std6socket16Internet6Address4nameMFNaNbNdNiNjNfZPS4core3sys5posixQkQCm8sockaddr@Base 12
+ _D3std6socket16Internet6Address4nameMxFNaNbNdNiNjNfZPxS4core3sys5posixQkQCo8sockaddr@Base 12
+ _D3std6socket16Internet6Address4portMxFNaNbNdNiNfZt@Base 12
+ _D3std6socket16Internet6Address5parseFNeMAxaZG16h@Base 12
+ _D3std6socket16Internet6Address6__ctorMFNaNbNiNfG16htZCQCbQCaQBw@Base 12
+ _D3std6socket16Internet6Address6__ctorMFNaNbNiNfS4core3sys5posix7netinet3in_12sockaddr_in6ZCQDmQDlQDh@Base 12
+ _D3std6socket16Internet6Address6__ctorMFNaNbNiNfZCQBwQBvQBr@Base 12
+ _D3std6socket16Internet6Address6__ctorMFNaNbNiNftZCQBxQBwQBs@Base 12
+ _D3std6socket16Internet6Address6__ctorMFNeMAxaMQeZCQBxQBwQBs@Base 12
+ _D3std6socket16Internet6Address6__ctorMFNfMAxatZCQBvQBuQBq@Base 12
+ _D3std6socket16Internet6Address6__initZ@Base 12
+ _D3std6socket16Internet6Address6__vtblZ@Base 12
+ _D3std6socket16Internet6Address7__ClassZ@Base 12
+ _D3std6socket16Internet6Address7nameLenMxFNaNbNdNiNfZk@Base 12
+ _D3std6socket16Internet6Address8ADDR_ANYFNaNbNcNdNiNfZxG16h@Base 12
+ _D3std6socket16wouldHaveBlockedFNbNiNfZb@Base 12
+ _D3std6socket17SocketOSException6__ctorMFNfAyaC6object9ThrowableQvmiPFNeiZQBfZCQCzQCyQCu@Base 12
+ _D3std6socket17SocketOSException6__ctorMFNfAyaQdmC6object9ThrowableiPFNeiZQBfZCQCzQCyQCu@Base 12
+ _D3std6socket17SocketOSException6__ctorMFNfAyaiPFNeiZQkQmmC6object9ThrowableZCQCyQCxQCt@Base 12
+ _D3std6socket17SocketOSException6__initZ@Base 12
+ _D3std6socket17SocketOSException6__vtblZ@Base 12
+ _D3std6socket17SocketOSException7__ClassZ@Base 12
+ _D3std6socket17SocketOptionLevel6__initZ@Base 12
+ _D3std6socket17formatSocketErrorFNeiZAya@Base 12
+ _D3std6socket18getAddressInfoImplFMAxaMQePS4core3sys5posix5netdb8addrinfoZASQCwQCv11AddressInfo@Base 12
+ _D3std6socket18getaddrinfoPointeryPUNbNiPxaQdPxS4core3sys5posix5netdb8addrinfoPPSQBhQBfQBeQBbQyZi@Base 12
+ _D3std6socket18getnameinfoPointeryPUNbNiPxS4core3sys5posixQkQCc8sockaddrkPakQdkiZi@Base 12
+ _D3std6socket19freeaddrinfoPointeryPUNbNiPS4core3sys5posix5netdb8addrinfoZv@Base 12
+ _D3std6socket21SocketAcceptException6__initZ@Base 12
+ _D3std6socket21SocketAcceptException6__vtblZ@Base 12
+ _D3std6socket21SocketAcceptException7__ClassZ@Base 12
+ _D3std6socket21SocketAcceptException8__mixin16__ctorMFNfAyaC6object9ThrowableQvmiZCQDdQDcQCy@Base 12
+ _D3std6socket21SocketAcceptException8__mixin16__ctorMFNfAyaQdmC6object9ThrowableiZCQDdQDcQCy@Base 12
+ _D3std6socket21SocketAcceptException8__mixin16__ctorMFNfAyaiQemC6object9ThrowableZCQDdQDcQCy@Base 12
+ _D3std6socket22SocketFeatureException6__initZ@Base 12
+ _D3std6socket22SocketFeatureException6__vtblZ@Base 12
+ _D3std6socket22SocketFeatureException7__ClassZ@Base 12
+ _D3std6socket22SocketFeatureException8__mixin16__ctorMFNaNbNiNfAyaC6object9ThrowableQvmZCQDjQDiQDe@Base 12
+ _D3std6socket22SocketFeatureException8__mixin16__ctorMFNaNbNiNfAyaQdmC6object9ThrowableZCQDjQDiQDe@Base 12
+ _D3std6socket23UnknownAddressReference4nameMFNaNbNdNiNfZPS4core3sys5posixQkQCr8sockaddr@Base 12
+ _D3std6socket23UnknownAddressReference4nameMxFNaNbNdNiNfZPxS4core3sys5posixQkQCt8sockaddr@Base 12
+ _D3std6socket23UnknownAddressReference6__ctorMFNaNbNiNfPS4core3sys5posixQkQCq8sockaddrkZCQDjQDiQDe@Base 12
+ _D3std6socket23UnknownAddressReference6__ctorMFNaNbPxS4core3sys5posixQkQCn8sockaddrkZCQDgQDfQDb@Base 12
+ _D3std6socket23UnknownAddressReference6__initZ@Base 12
+ _D3std6socket23UnknownAddressReference6__vtblZ@Base 12
+ _D3std6socket23UnknownAddressReference7__ClassZ@Base 12
+ _D3std6socket23UnknownAddressReference7nameLenMxFNaNbNdNiNfZk@Base 12
+ _D3std6socket24SocketParameterException6__initZ@Base 12
+ _D3std6socket24SocketParameterException6__vtblZ@Base 12
+ _D3std6socket24SocketParameterException7__ClassZ@Base 12
+ _D3std6socket24SocketParameterException8__mixin16__ctorMFNaNbNiNfAyaC6object9ThrowableQvmZCQDlQDkQDg@Base 12
+ _D3std6socket24SocketParameterException8__mixin16__ctorMFNaNbNiNfAyaQdmC6object9ThrowableZCQDlQDkQDg@Base 12
+ _D3std6socket25_sharedStaticCtor_L282_C1FZv@Base 12
+ _D3std6socket25_sharedStaticDtor_L317_C1FNbNiZv@Base 12
+ _D3std6socket6Linger2onMNgFNaNbNcNdNiNjNfZNgi@Base 12
+ _D3std6socket6Linger4timeMNgFNaNbNcNdNiNjNfZNgi@Base 12
+ _D3std6socket6Linger6__initZ@Base 12
+ _D3std6socket6Socket11receiveFromMFNeAvEQBmQBl11SocketFlagsKCQChQCg7AddressZl@Base 12
+ _D3std6socket6Socket11receiveFromMFNeAvEQBmQBl11SocketFlagsZl@Base 12
+ _D3std6socket6Socket11receiveFromMFNfAvKCQBnQBm7AddressZl@Base 12
+ _D3std6socket6Socket11receiveFromMFNfAvZl@Base 12
+ _D3std6socket6Socket12getErrorTextMFNfZAya@Base 12
+ _D3std6socket6Socket12localAddressMFNdNeZCQBoQBn7Address@Base 12
+ _D3std6socket6Socket12setKeepAliveMFNeiiZv@Base 12
+ _D3std6socket6Socket13addressFamilyMFNdNfZEQBpQBo13AddressFamily@Base 12
+ _D3std6socket6Socket13createAddressMFNaNbNfZCQBrQBq7Address@Base 12
+ _D3std6socket6Socket13remoteAddressMFNdNeZCQBpQBo7Address@Base 12
+ _D3std6socket6Socket4bindMFNeCQBcQBb7AddressZv@Base 12
+ _D3std6socket6Socket4sendMFNeAxvEQBfQBe11SocketFlagsZl@Base 12
+ _D3std6socket6Socket4sendMFNfAxvZl@Base 12
+ _D3std6socket6Socket5closeMFNbNiNeZv@Base 12
+ _D3std6socket6Socket6__ctorMFNaNbNiNfEQBkQBj8socket_tEQCaQBz13AddressFamilyZCQCxQCwQCs@Base 12
+ _D3std6socket6Socket6__ctorMFNaNbNiNfZCQBlQBkQBg@Base 12
+ _D3std6socket6Socket6__ctorMFNeEQBeQBd13AddressFamilyEQCaQBz10SocketTypeEQCtQCs12ProtocolTypeZCQDpQDoQDk@Base 12
+ _D3std6socket6Socket6__ctorMFNeEQBeQBd13AddressFamilyEQCaQBz10SocketTypeMAxaZCQCyQCxQCt@Base 12
+ _D3std6socket6Socket6__ctorMFNfEQBeQBd13AddressFamilyEQCaQBz10SocketTypeZCQCuQCtQCp@Base 12
+ _D3std6socket6Socket6__ctorMFNfMxSQBgQBf11AddressInfoZCQCbQCaQBw@Base 12
+ _D3std6socket6Socket6__dtorMFNbNiNfZv@Base 12
+ _D3std6socket6Socket6__initZ@Base 12
+ _D3std6socket6Socket6__vtblZ@Base 12
+ _D3std6socket6Socket6_closeFNbNiEQBfQBe8socket_tZv@Base 12
+ _D3std6socket6Socket6acceptMFNeZCQBfQBeQBa@Base 12
+ _D3std6socket6Socket6handleMxFNaNbNdNiNfZEQBoQBn8socket_t@Base 12
+ _D3std6socket6Socket6listenMFNeiZv@Base 12
+ _D3std6socket6Socket6selectFNeCQBdQBc9SocketSetQrQtPSQBzQBy7TimeValZi@Base 12
+ _D3std6socket6Socket6selectFNeCQBdQBc9SocketSetQrQtS4core4time8DurationZi@Base 12
+ _D3std6socket6Socket6selectFNfCQBdQBc9SocketSetQrQtZi@Base 12
+ _D3std6socket6Socket6sendToMFNeAxvEQBhQBg11SocketFlagsCQCbQCa7AddressZl@Base 12
+ _D3std6socket6Socket6sendToMFNeAxvEQBhQBg11SocketFlagsZl@Base 12
+ _D3std6socket6Socket6sendToMFNfAxvCQBhQBg7AddressZl@Base 12
+ _D3std6socket6Socket6sendToMFNfAxvZl@Base 12
+ _D3std6socket6Socket7__ClassZ@Base 12
+ _D3std6socket6Socket7connectMFNeCQBfQBe7AddressZv@Base 12
+ _D3std6socket6Socket7isAliveMxFNdNeZb@Base 12
+ _D3std6socket6Socket7receiveMFNeAvEQBhQBg11SocketFlagsZl@Base 12
+ _D3std6socket6Socket7receiveMFNfAvZl@Base 12
+ _D3std6socket6Socket7setSockMFNfEQBfQBe8socket_tZv@Base 12
+ _D3std6socket6Socket8blockingMFNdNebZv@Base 12
+ _D3std6socket6Socket8blockingMxFNbNdNiNeZb@Base 12
+ _D3std6socket6Socket8capToIntFNbNiNfmZi@Base 12
+ _D3std6socket6Socket8hostNameFNdNeZAya@Base 12
+ _D3std6socket6Socket8shutdownMFNbNiNeEQBkQBj14SocketShutdownZv@Base 12
+ _D3std6socket6Socket9acceptingMFNaNbNfZCQBmQBlQBh@Base 12
+ _D3std6socket6Socket9getOptionMFNeEQBhQBg17SocketOptionLevelEQChQCg12SocketOptionAvZi@Base 12
+ _D3std6socket6Socket9getOptionMFNeEQBhQBg17SocketOptionLevelEQChQCg12SocketOptionJS4core4time8DurationZv@Base 12
+ _D3std6socket6Socket9getOptionMFNeEQBhQBg17SocketOptionLevelEQChQCg12SocketOptionJSQDdQDc6LingerZi@Base 12
+ _D3std6socket6Socket9getOptionMFNeEQBhQBg17SocketOptionLevelEQChQCg12SocketOptionJiZi@Base 12
+ _D3std6socket6Socket9setOptionMFNeEQBhQBg17SocketOptionLevelEQChQCg12SocketOptionAvZv@Base 12
+ _D3std6socket6Socket9setOptionMFNeEQBhQBg17SocketOptionLevelEQChQCg12SocketOptionS4core4time8DurationZv@Base 12
+ _D3std6socket6Socket9setOptionMFNeEQBhQBg17SocketOptionLevelEQChQCg12SocketOptionSQDcQDb6LingerZv@Base 12
+ _D3std6socket6Socket9setOptionMFNeEQBhQBg17SocketOptionLevelEQChQCg12SocketOptioniZv@Base 12
+ _D3std6socket7Address10setNameLenMFNfkZv@Base 12
+ _D3std6socket7Address12toAddrStringMxFNfZAya@Base 12
+ _D3std6socket7Address12toHostStringMxFNebZAya@Base 12
+ _D3std6socket7Address12toPortStringMxFNfZAya@Base 12
+ _D3std6socket7Address13addressFamilyMxFNaNbNdNiNfZEQBxQBw13AddressFamily@Base 12
+ _D3std6socket7Address15toServiceStringMxFNebZAya@Base 12
+ _D3std6socket7Address16toHostNameStringMxFNfZAya@Base 12
+ _D3std6socket7Address19toServiceNameStringMxFNfZAya@Base 12
+ _D3std6socket7Address6__initZ@Base 12
+ _D3std6socket7Address6__vtblZ@Base 12
+ _D3std6socket7Address7__ClassZ@Base 12
+ _D3std6socket7Address8toStringMxFNfZAya@Base 12
+ _D3std6socket7Service16getServiceByNameMFNbNeMAxaMQeZb@Base 12
+ _D3std6socket7Service16getServiceByPortMFNbNetMAxaZb@Base 12
+ _D3std6socket7Service6__initZ@Base 12
+ _D3std6socket7Service6__vtblZ@Base 12
+ _D3std6socket7Service7__ClassZ@Base 12
+ _D3std6socket7Service8populateMFNaNbPS4core3sys5posix5netdb7serventZv@Base 12
+ _D3std6socket7TimeVal12microsecondsMNgFNaNbNcNdNiNjNfZNgl@Base 12
+ _D3std6socket7TimeVal6__initZ@Base 12
+ _D3std6socket7TimeVal7secondsMNgFNaNbNcNdNiNjNfZNgl@Base 12
+ _D3std6socket8Protocol17getProtocolByNameMFNbNeMAxaZb@Base 12
+ _D3std6socket8Protocol17getProtocolByTypeMFNbNeEQBuQBt12ProtocolTypeZb@Base 12
+ _D3std6socket8Protocol6__initZ@Base 12
+ _D3std6socket8Protocol6__vtblZ@Base 12
+ _D3std6socket8Protocol7__ClassZ@Base 12
+ _D3std6socket8Protocol8populateMFNaNbPS4core3sys5posix5netdb8protoentZv@Base 12
+ _D3std6socket8_lasterrFNbNiNfZi@Base 12
+ _D3std6socket8socket_t6__initZ@Base 12
+ _D3std6socket9SocketSet14setMinCapacityMFNaNbNfmZv@Base 12
+ _D3std6socket9SocketSet3addMFNaNbNeEQBiQBh8socket_tZv@Base 12
+ _D3std6socket9SocketSet3addMFNaNbNfCQBiQBh6SocketZv@Base 12
+ _D3std6socket9SocketSet3maxMxFNaNbNdNiNfZk@Base 12
+ _D3std6socket9SocketSet4maskFNaNbNiNfkZl@Base 12
+ _D3std6socket9SocketSet5isSetMxFNaNbNiNfCQBnQBm6SocketZi@Base 12
+ _D3std6socket9SocketSet5isSetMxFNaNbNiNfEQBnQBm8socket_tZi@Base 12
+ _D3std6socket9SocketSet5resetMFNaNbNiNfZv@Base 12
+ _D3std6socket9SocketSet6__ctorMFNaNbNfmZCQBnQBmQBi@Base 12
+ _D3std6socket9SocketSet6__initZ@Base 12
+ _D3std6socket9SocketSet6__vtblZ@Base 12
+ _D3std6socket9SocketSet6removeMFNaNbNfCQBlQBk6SocketZv@Base 12
+ _D3std6socket9SocketSet6removeMFNaNbNfEQBlQBk8socket_tZv@Base 12
+ _D3std6socket9SocketSet6resizeMFNaNbNfmZv@Base 12
+ _D3std6socket9SocketSet7__ClassZ@Base 12
+ _D3std6socket9SocketSet7selectnMxFNaNbNiNfZi@Base 12
+ _D3std6socket9SocketSet8capacityMxFNaNbNdNiNfZm@Base 12
+ _D3std6socket9SocketSet8toFd_setMFNaNbNiNeZPS4core3sys5posixQk6select6fd_set@Base 12
+ _D3std6socket9SocketSet9lengthForFNaNbNiNfmZm@Base 12
+ _D3std6socket9TcpSocket6__ctorMFNfCQBhQBg7AddressZCQBxQBwQBs@Base 12
+ _D3std6socket9TcpSocket6__ctorMFNfEQBhQBg13AddressFamilyZCQCeQCdQBz@Base 12
+ _D3std6socket9TcpSocket6__ctorMFNfZCQBiQBhQBd@Base 12
+ _D3std6socket9TcpSocket6__initZ@Base 12
+ _D3std6socket9TcpSocket6__vtblZ@Base 12
+ _D3std6socket9TcpSocket7__ClassZ@Base 12
+ _D3std6socket9UdpSocket6__ctorMFNfEQBhQBg13AddressFamilyZCQCeQCdQBz@Base 12
+ _D3std6socket9UdpSocket6__ctorMFNfZCQBiQBhQBd@Base 12
+ _D3std6socket9UdpSocket6__initZ@Base 12
+ _D3std6socket9UdpSocket6__vtblZ@Base 12
+ _D3std6socket9UdpSocket7__ClassZ@Base 12
+ _D3std6socket__T14getAddressInfoTAxaTEQBkQBj13AddressFamilyZQBsFNfMQBiMQBmQBlZASQDaQCz11AddressInfo@Base 12
+ _D3std6socket__T14getAddressInfoTAxaTEQBkQBj16AddressInfoFlagsZQBvFNfMQBlMQBpQBoZASQDdQDc11AddressInfo@Base 12
+ _D3std6socket__T14getAddressInfoTAxaZQvFNfMQkMQnZASQBxQBw11AddressInfo@Base 12
+ _D3std6socket__T14getAddressInfoTEQBgQBf16AddressInfoFlagsZQBrFNfMAxaQBkZASQCvQCu11AddressInfo@Base 12
+ _D3std6stdint11__moduleRefZ@Base 12
+ _D3std6stdint12__ModuleInfoZ@Base 12
+ _D3std6string11__moduleRefZ@Base 12
+ _D3std6string12__ModuleInfoZ@Base 12
+ _D3std6string14makeTransTableFNaNbNiNfMAxaMQeZG256a@Base 12
+ _D3std6string15StringException6__initZ@Base 12
+ _D3std6string15StringException6__vtblZ@Base 12
+ _D3std6string15StringException7__ClassZ@Base 12
+ _D3std6string15StringException8__mixin26__ctorMFNaNbNiNfAyaC6object9ThrowableQvmZCQDcQDbQCx@Base 12
+ _D3std6string15StringException8__mixin26__ctorMFNaNbNiNfAyaQdmC6object9ThrowableZCQDcQDbQCx@Base 12
+ _D3std6string6abbrevFNaNfAAyaZHQfQh@Base 12
+ _D3std6string7soundexFNaNbNfMAxaNkMAaZQd@Base 12
+ _D3std6string9makeTransFNaNbNeMAxaMQeZAya@Base 12
+ _D3std6string9toStringzFNaNbNeMAxaZPya@Base 12
+ _D3std6string__T10stripRightTAyaZQrFNaNbNiNfQpZQs@Base 12
+ _D3std6string__T11_indexOfStrVEQBd8typecons__T4FlagVAyaa13_6361736553656e736974697665ZQBoi1Z__TQDbTQBvTaZQDlFNaNbNiNfQCnAxaZl@Base 12
+ _D3std6string__T11lastIndexOfTaZQqFNaNiNfAxaIwIEQBu8typecons__T4FlagVAyaa13_6361736553656e736974697665ZQBoZl@Base 12
+ _D3std6string__T12LineSplitterVEQBe8typecons__T4FlagVAyaa14_6b6565705465726d696e61746f72ZQBqi0TQBqZQDf11__xopEqualsMxFKxSQEpQEo__TQEkVQDzi0TQDjZQEyZb@Base 12
+ _D3std6string__T12LineSplitterVEQBe8typecons__T4FlagVAyaa14_6b6565705465726d696e61746f72ZQBqi0TQBqZQDf4saveMFNaNbNdNiNfZSQEpQEo__TQEkVQDzi0TQDjZQEy@Base 12
+ _D3std6string__T12LineSplitterVEQBe8typecons__T4FlagVAyaa14_6b6565705465726d696e61746f72ZQBqi0TQBqZQDf5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std6string__T12LineSplitterVEQBe8typecons__T4FlagVAyaa14_6b6565705465726d696e61746f72ZQBqi0TQBqZQDf5frontMFNaNbNdNiNfZQCq@Base 12
+ _D3std6string__T12LineSplitterVEQBe8typecons__T4FlagVAyaa14_6b6565705465726d696e61746f72ZQBqi0TQBqZQDf6__ctorMFNaNbNcNiNfQCqZSQEuQEt__TQEpVQEei0TQDoZQFd@Base 12
+ _D3std6string__T12LineSplitterVEQBe8typecons__T4FlagVAyaa14_6b6565705465726d696e61746f72ZQBqi0TQBqZQDf6__initZ@Base 12
+ _D3std6string__T12LineSplitterVEQBe8typecons__T4FlagVAyaa14_6b6565705465726d696e61746f72ZQBqi0TQBqZQDf8popFrontMFNaNbNiNfZv@Base 12
+ _D3std6string__T12LineSplitterVEQBe8typecons__T4FlagVAyaa14_6b6565705465726d696e61746f72ZQBqi0TQBqZQDf9__xtoHashFNbNeKxSQEoQEn__TQEjVQDyi0TQDiZQExZm@Base 12
+ _D3std6string__T12lineSplitterVEQBe8typecons__T4FlagVAyaa14_6b6565705465726d696e61746f72ZQBqi0TyaZQDeFNaNbNiNfQCfZSQEjQEi__T12LineSplitterVQEei0TQDoZQz@Base 12
+ _D3std6string__T12rightJustifyTAyaZQtFNaNbNfQnmwZQs@Base 12
+ _D3std6string__T14representationTxaZQuFNaNbNiNfAxaZAxh@Base 12
+ _D3std6string__T14representationTyaZQuFNaNbNiNfAyaZAyh@Base 12
+ _D3std6string__T14rightJustifierTAyaZQvFNaNbNiNfQpmwZSQCa3utf__T5byUTFTaVEQCu8typecons__T4FlagVQCka19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDiTSQFxQFw__TQFsTSQGlQEl__TQEkTwVQEhi1Z__TQEzTSQHoQFo__T10byCodeUnitTQHfZQrFQHmZ14ByCodeUnitImplZQHcFNcQCfZ6ResultZQJqFQDymwZQsZQIhFNcQFbZQBf@Base 12
+ _D3std6string__T14rightJustifierTSQBg3utf__T5byUTFTwVEQCa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDiTSQFdQDx__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFlFNcQCfZ6ResultZQHfFNaNbNiNfQHamwZSQImQIl__TQIhTQHuZQIpFQIcmwZQCc@Base 12
+ _D3std6string__T14rightJustifierTSQBg3utf__T5byUTFTwVEQCa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDiTSQFdQDx__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFlFNcQCfZ6ResultZQHfFQGsmwZQs10initializeMFNaNbNiNfZv@Base 12
+ _D3std6string__T14rightJustifierTSQBg3utf__T5byUTFTwVEQCa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDiTSQFdQDx__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFlFNcQCfZ6ResultZQHfFQGsmwZQs11__xopEqualsMxFKxSQIyQIx__TQItTQIgZQJbFQIomwZQCoZb@Base 12
+ _D3std6string__T14rightJustifierTSQBg3utf__T5byUTFTwVEQCa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDiTSQFdQDx__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFlFNcQCfZ6ResultZQHfFQGsmwZQs4saveMFNaNbNdNiNfZSQIyQIx__TQItTQIgZQJbFQIomwZQCo@Base 12
+ _D3std6string__T14rightJustifierTSQBg3utf__T5byUTFTwVEQCa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDiTSQFdQDx__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFlFNcQCfZ6ResultZQHfFQGsmwZQs5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std6string__T14rightJustifierTSQBg3utf__T5byUTFTwVEQCa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDiTSQFdQDx__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFlFNcQCfZ6ResultZQHfFQGsmwZQs5frontMFNaNbNdNiNfZw@Base 12
+ _D3std6string__T14rightJustifierTSQBg3utf__T5byUTFTwVEQCa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDiTSQFdQDx__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFlFNcQCfZ6ResultZQHfFQGsmwZQs6__ctorMFNaNbNcNiNfQHtmwZSQJfQJe__TQJaTQInZQJiFQIvmwZQCv@Base 12
+ _D3std6string__T14rightJustifierTSQBg3utf__T5byUTFTwVEQCa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDiTSQFdQDx__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFlFNcQCfZ6ResultZQHfFQGsmwZQs6__initZ@Base 12
+ _D3std6string__T14rightJustifierTSQBg3utf__T5byUTFTwVEQCa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDiTSQFdQDx__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFlFNcQCfZ6ResultZQHfFQGsmwZQs8popFrontMFNaNbNiNfZv@Base 12
+ _D3std6string__T14rightJustifierTSQBg3utf__T5byUTFTwVEQCa8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDiTSQFdQDx__T10byCodeUnitTQDeZQrFQDlZ14ByCodeUnitImplZQFlFNcQCfZ6ResultZQHfFQGsmwZQs9__xtoHashFNbNeKxSQIxQIw__TQIsTQIfZQJaFQInmwZQCnZm@Base 12
+ _D3std6string__T5chompTAxaZQlFNaNbNiNfQpZQs@Base 12
+ _D3std6string__T5stripTAyaZQlFNaNbNiNfQpZQs@Base 12
+ _D3std6string__T7indexOfTAyaTaZQpFNaNbNiNfQrAxaZl@Base 12
+ _D3std6string__T7indexOfTaZQlFNaNbNiNfMAxawEQBq8typecons__T4FlagVAyaa13_6361736553656e736974697665ZQBoZl@Base 12
+ _D3std6string__T8_indexOfTAxaZQoFNaNbNiNfQpwEQBr8typecons__T4FlagVAyaa13_6361736553656e736974697665ZQBoZl@Base 12
+ _D3std6string__T8_indexOfTAxaZQoFQhwEQBj8typecons__T4FlagVAyaa13_6361736553656e736974697665ZQBoZ13trustedmemchrFNaNbNiNeQDqaZl@Base 12
+ _D3std6string__T9isNumericTAxaZQpFNaNbNiNfQpbZb@Base 12
+ _D3std6string__T9isNumericTAxaZQpFQhbZ__T8asciiCmpTSQBy3utf__T10byCodeUnitTQBwZQrFQCdZ14ByCodeUnitImplZQCkFNaNbNiNfQCmAyaZb@Base 12
+ _D3std6string__T9soundexerTAxaZQpFNaNbNiNfQpZG4a@Base 12
+ _D3std6string__T9soundexerTAxaZQpFQhZ3dexyAa@Base 12
+ _D3std6string__T9stripLeftTAyaZQpFNaNbNiNfQpZQs@Base 12
+ _D3std6system11__moduleRefZ@Base 12
+ _D3std6system12__ModuleInfoZ@Base 12
+ _D3std6system2OS6__initZ@Base 12
+ _D3std6system2osyEQqQo2OS@Base 12
+ _D3std6system6endianyEQuQs6Endian@Base 12
+ _D3std6traits11__moduleRefZ@Base 12
+ _D3std6traits12__ModuleInfoZ@Base 12
+ _D3std6traits23__InoutWorkaroundStruct6__initZ@Base 12
+ _D3std6traits__T18extractAttribFlagsVAyaa4_70757265VQpa7_6e6f7468726f77VQBja5_4073616665ZQCvFNaNbNiNfZEQDxQDw17FunctionAttribute@Base 12
+ _D3std6traits__T18extractAttribFlagsVAyaa7_6e6f7468726f77VQva5_406e6f6763VQBla5_4073616665ZQCxFNaNbNiNfZEQDzQDy17FunctionAttribute@Base 12
+ _D3std6traits__T18extractAttribFlagsVAyaa7_6e6f7468726f77VQva5_4073616665ZQCgFNaNbNiNfZEQDiQDh17FunctionAttribute@Base 12
+ _D3std7complex11__moduleRefZ@Base 12
+ _D3std7complex12__ModuleInfoZ@Base 12
+ _D3std7complex4expiFNaNbNiNeeZSQBdQBc__T7ComplexTeZQl@Base 12
+ _D3std7complex9coshisinhFNaNbNiNfeZSQBiQBh__T7ComplexTeZQl@Base 12
+ _D3std7complex__T7ComplexTeZQl11__xopEqualsMxFKxSQBvQBu__TQBpTeZQBvZb@Base 12
+ _D3std7complex__T7ComplexTeZQl6__initZ@Base 12
+ _D3std7complex__T7ComplexTeZQl8toStringMxFNaNfZAya@Base 12
+ _D3std7complex__T7ComplexTeZQl8toStringMxFNfZ__T19trustedAssumeUniqueTAaZQzFNaNbNiNeQoZAya@Base 12
+ _D3std7complex__T7ComplexTeZQl9__xtoHashFNbNeKxSQBuQBt__TQBoTeZQBuZm@Base 12
+ _D3std7complex__T7ComplexTeZQl__T6__ctorHTeHTeZQoMFNaNbNcNiNfxexeZSQCnQCm__TQChTeZQCn@Base 12
+ _D3std7complex__T7ComplexTeZQl__T8opEqualsHTeZQnMxFNaNbNiNfSQCgQCf__TQCaTeZQCgZb@Base 12
+ _D3std7complex__T7ComplexTeZQl__T8toStringTDFNaNbNfAxaZvTaZQBaMxFNaNfMQBbMKxSQCx6format4spec__T10FormatSpecTaZQpZv@Base 12
+ _D3std7numeric11__moduleRefZ@Base 12
+ _D3std7numeric12__ModuleInfoZ@Base 12
+ _D3std7numeric16CustomFloatFlags6__initZ@Base 12
+ _D3std7numeric18decimalToFactorialFNaNbNiNfmKG21hZm@Base 12
+ _D3std7numeric20isCorrectCustomFloatFNaNbNiNfkkEQBuQBt16CustomFloatFlagsZb@Base 12
+ _D3std7numeric3Fft4sizeMxFNdZm@Base 12
+ _D3std7numeric3Fft6__ctorMFAfZCQBdQBcQx@Base 12
+ _D3std7numeric3Fft6__ctorMFmZCQBcQBbQw@Base 12
+ _D3std7numeric3Fft6__initZ@Base 12
+ _D3std7numeric3Fft6__vtblZ@Base 12
+ _D3std7numeric3Fft7__ClassZ@Base 12
+ _D3std7numeric__T13oppositeSignsTyeTeZQvFNaNbNiNfyeeZb@Base 12
+ _D3std7numeric__T6StrideTAfZQl11__xopEqualsMxFKxSQBvQBu__TQBpTQBlZQBxZb@Base 12
+ _D3std7numeric__T6StrideTAfZQl11doubleStepsMFNaNbNiNfZv@Base 12
+ _D3std7numeric__T6StrideTAfZQl4saveMFNaNbNdNiNfZSQBvQBu__TQBpTQBlZQBx@Base 12
+ _D3std7numeric__T6StrideTAfZQl5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std7numeric__T6StrideTAfZQl5frontMFNaNbNdNiNfZf@Base 12
+ _D3std7numeric__T6StrideTAfZQl6__ctorMFNaNbNcNiNfQymZSQCaQBz__TQBuTQBqZQCc@Base 12
+ _D3std7numeric__T6StrideTAfZQl6__initZ@Base 12
+ _D3std7numeric__T6StrideTAfZQl6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std7numeric__T6StrideTAfZQl6nStepsMFNaNbNdNiNfmZm@Base 12
+ _D3std7numeric__T6StrideTAfZQl6nStepsMxFNaNbNdNiNfZm@Base 12
+ _D3std7numeric__T6StrideTAfZQl7opIndexMFNaNbNiNfmZf@Base 12
+ _D3std7numeric__T6StrideTAfZQl7popHalfMFNaNbNiNfZv@Base 12
+ _D3std7numeric__T6StrideTAfZQl8popFrontMFNaNbNiNfZv@Base 12
+ _D3std7numeric__T6StrideTAfZQl9__xtoHashFNbNeKxSQBuQBt__TQBoTQBkZQBwZm@Base 12
+ _D3std7numeric__T8findRootTeTDFNaNbNiNfeZeTPFNaNbNiNfeeZbZQBpFNaNbNiNfMQBqxexeMQBkZe@Base 12
+ _D3std7numeric__T8findRootTeTDFNaNbNiNfeZeZQBaFMQtxexeZ9__lambda4FNaNbNiNfeeZb@Base 12
+ _D3std7numeric__T8findRootTeTDFNaNbNiNfeZeZQBaFNaNbNiNfMQBbxexeZe@Base 12
+ _D3std7numeric__T8findRootTeTeTDFNaNbNiNfeZeTPFNaNbNiNfeeZbZQBrFMQBixexexexeMQBgZ18secant_interpolateFNaNbNiNfeeeeZe@Base 12
+ _D3std7numeric__T8findRootTeTeTDFNaNbNiNfeZeTPFNaNbNiNfeeZbZQBrFNaNbNiNfMQBqxexexexeMQBoZSQDk8typecons__T5TupleTeTeTeTeZQp@Base 12
+ _D3std7process10setCLOEXECFNbNiibZv@Base 12
+ _D3std7process10spawnShellFNeMAxaMxHAyaAyaSQBpQBo6ConfigMQBbMQzZCQClQCk3Pid@Base 12
+ _D3std7process10spawnShellFNeMAxaSQBg5stdio4FileQpQrMxHAyaAyaSQCiQCh6ConfigMQBuMQzZCQDeQDd3Pid@Base 12
+ _D3std7process10toAStringzFIAAyaPPxaZv@Base 12
+ _D3std7process11__moduleRefZ@Base 12
+ _D3std7process11environment13opIndexAssignFNeNkMNgAaMAxaZANga@Base 12
+ _D3std7process11environment3getFNfMAxaAyaZQe@Base 12
+ _D3std7process11environment4toAAFNeZHAyaQd@Base 12
+ _D3std7process11environment6__initZ@Base 12
+ _D3std7process11environment6__vtblZ@Base 12
+ _D3std7process11environment6removeFNbNiNeMAxaZv@Base 12
+ _D3std7process11environment7__ClassZ@Base 12
+ _D3std7process11environment7getImplFNeMAxaMDFNfQiZvZv@Base 12
+ _D3std7process11environment7opIndexFNfMAxaZAya@Base 12
+ _D3std7process11environment__T14cachedToStringTaZQtFNbNfMAxaZAya@Base 12
+ _D3std7process11environment__T14cachedToStringTaZQtFNfMAxaZ10lastResultAya@Base 12
+ _D3std7process11nativeShellFNaNbNdNiNfZAya@Base 12
+ _D3std7process11pipeProcessFNfMAxAaEQBiQBh8RedirectxHAyaAyaSQCgQCf6ConfigMAxaZSQCzQCy12ProcessPipes@Base 12
+ _D3std7process11pipeProcessFNfMAxaEQBhQBg8RedirectxHAyaAyaSQCfQCe6ConfigMQBqZSQCyQCx12ProcessPipes@Base 12
+ _D3std7process11shellSwitchyAa@Base 12
+ _D3std7process12ProcessPipes11__fieldDtorMFNeZv@Base 12
+ _D3std7process12ProcessPipes11__xopEqualsMxFKxSQBtQBsQBnZb@Base 12
+ _D3std7process12ProcessPipes15__fieldPostblitMFNbNlNeZv@Base 12
+ _D3std7process12ProcessPipes3pidMFNbNdNfZCQBoQBn3Pid@Base 12
+ _D3std7process12ProcessPipes5stdinMFNbNdNfZSQBq5stdio4File@Base 12
+ _D3std7process12ProcessPipes6__initZ@Base 12
+ _D3std7process12ProcessPipes6stderrMFNbNdNfZSQBr5stdio4File@Base 12
+ _D3std7process12ProcessPipes6stdoutMFNbNdNfZSQBr5stdio4File@Base 12
+ _D3std7process12ProcessPipes8opAssignMFNcNjNeSQBsQBrQBmZQl@Base 12
+ _D3std7process12ProcessPipes9__xtoHashFNbNeKxSQBsQBrQBmZm@Base 12
+ _D3std7process12__ModuleInfoZ@Base 12
+ _D3std7process12executeShellFNfMAxaxHAyaAyaSQBqQBp6ConfigmMQBbQzZSQCm8typecons__T5TupleTiVQCba6_737461747573TQCuVQCya6_6f7574707574ZQBz@Base 12
+ _D3std7process12spawnProcessFNeMAxAaxHAyaAyaSQBrQBq6ConfigMAxaZCQCkQCj3Pid@Base 12
+ _D3std7process12spawnProcessFNeMAxaSQBi5stdio4FileQpQrxHAyaAyaSQCjQCi6ConfigMQBtZCQDcQDb3Pid@Base 12
+ _D3std7process12spawnProcessFNeMAxaxHAyaAyaSQBqQBp6ConfigMQBaZCQCjQCi3Pid@Base 12
+ _D3std7process12spawnProcessFNfMAxAaSQBj5stdio4FileQpQrxHAyaAyaSQCkQCj6ConfigMxQBtZCQDeQDd3Pid@Base 12
+ _D3std7process12thisThreadIDFNbNdNeZm@Base 12
+ _D3std7process13charAllocatorFNaNbNfmZAa@Base 12
+ _D3std7process13getEnvironPtrFNeZxPPa@Base 12
+ _D3std7process13searchPathForFNfMAxaZAya@Base 12
+ _D3std7process13thisProcessIDFNbNdNeZi@Base 12
+ _D3std7process14uniqueTempPathFNfZAya@Base 12
+ _D3std7process16ProcessException12newFromErrnoFAyaQdmZCQCbQCaQBv@Base 12
+ _D3std7process16ProcessException12newFromErrnoFiAyaQdmZCQCcQCbQBw@Base 12
+ _D3std7process16ProcessException6__initZ@Base 12
+ _D3std7process16ProcessException6__vtblZ@Base 12
+ _D3std7process16ProcessException7__ClassZ@Base 12
+ _D3std7process16ProcessException8__mixin36__ctorMFNaNbNiNfAyaC6object9ThrowableQvmZCQDeQDdQCy@Base 12
+ _D3std7process16ProcessException8__mixin36__ctorMFNaNbNiNfAyaQdmC6object9ThrowableZCQDeQDdQCy@Base 12
+ _D3std7process17spawnProcessPosixFNeMAxAaSQBo5stdio4FileQpQrMxHAyaAyaSQCqQCp6ConfigMAxaZ12abortOnErrorFNbNiiEQEdQEc13InternalErroriZv@Base 12
+ _D3std7process17spawnProcessPosixFNeMAxAaSQBo5stdio4FileQpQrMxHAyaAyaSQCqQCp6ConfigMAxaZ5getFDFNfKQCfZi@Base 12
+ _D3std7process17spawnProcessPosixFNeMAxAaSQBo5stdio4FileQpQrMxHAyaAyaSQCqQCp6ConfigMAxaZCQDjQDi3Pid@Base 12
+ _D3std7process18escapeShellCommandFNaNfMAxAaXAya@Base 12
+ _D3std7process19escapePosixArgumentFNaNbNeMAxaZAya@Base 12
+ _D3std7process19escapeShellFileNameFNaNbNeMAxaZAya@Base 12
+ _D3std7process20escapeShellArgumentsFNaNbNeMAxAaX9allocatorMFNaNbNfmZAa@Base 12
+ _D3std7process20escapeShellArgumentsFNaNbNeMAxAaXAya@Base 12
+ _D3std7process21escapeWindowsArgumentFNaNbNeMAxaZAya@Base 12
+ _D3std7process24escapeShellCommandStringFNaNfNkMAyaZQe@Base 12
+ _D3std7process25escapeWindowsShellCommandFNaNfMAxaZAya@Base 12
+ _D3std7process3Pid11performWaitMFNebZi@Base 12
+ _D3std7process3Pid6__ctorMFNaNbNfibZCQBjQBiQBd@Base 12
+ _D3std7process3Pid6__initZ@Base 12
+ _D3std7process3Pid6__vtblZ@Base 12
+ _D3std7process3Pid7__ClassZ@Base 12
+ _D3std7process3Pid8osHandleMFNaNbNdNiNfZi@Base 12
+ _D3std7process3Pid9processIDMxFNaNbNdNfZi@Base 12
+ _D3std7process4Pipe11__fieldDtorMFNeZv@Base 12
+ _D3std7process4Pipe11__xopEqualsMxFKxSQBkQBjQBeZb@Base 12
+ _D3std7process4Pipe15__fieldPostblitMFNbNlNeZv@Base 12
+ _D3std7process4Pipe5closeMFNfZv@Base 12
+ _D3std7process4Pipe6__initZ@Base 12
+ _D3std7process4Pipe7readEndMFNbNdNfZSQBj5stdio4File@Base 12
+ _D3std7process4Pipe8opAssignMFNcNjNeSQBjQBiQBdZQl@Base 12
+ _D3std7process4Pipe8writeEndMFNbNdNfZSQBk5stdio4File@Base 12
+ _D3std7process4Pipe9__xtoHashFNbNeKxSQBjQBiQBdZm@Base 12
+ _D3std7process4killFCQtQr3PidZv@Base 12
+ _D3std7process4killFCQtQr3PidiZv@Base 12
+ _D3std7process4pipeFNeZSQwQu4Pipe@Base 12
+ _D3std7process4waitFNfCQvQt3PidZi@Base 12
+ _D3std7process5execvFIAyaIAQfZi@Base 12
+ _D3std7process6Config6__initZ@Base 12
+ _D3std7process6browseFNbNiNfMAxaZ9__lambda4FNbNiNeZPa@Base 12
+ _D3std7process6browseFNbNiNfMAxaZv@Base 12
+ _D3std7process6execv_FIAyaIAQfZi@Base 12
+ _D3std7process6execveFIAyaIAQfIQeZi@Base 12
+ _D3std7process6execvpFIAyaIAQfZi@Base 12
+ _D3std7process7executeFNfMAxAaxHAyaAyaSQBlQBk6ConfigmMAxaZSQCf8typecons__T5TupleTiVQBza6_737461747573TQCsVQCwa6_6f7574707574ZQBz@Base 12
+ _D3std7process7executeFNfMAxaxHAyaAyaSQBkQBj6ConfigmMQBbZSQCe8typecons__T5TupleTiVQBza6_737461747573TQCsVQCwa6_6f7574707574ZQBz@Base 12
+ _D3std7process7execve_FIAyaIAQfIQeZi@Base 12
+ _D3std7process7execvp_FIAyaIAQfZi@Base 12
+ _D3std7process7execvpeFIAyaIAQfIQeZi@Base 12
+ _D3std7process7tryWaitFNfCQyQw3PidZSQBi8typecons__T5TupleTbVAyaa10_7465726d696e61746564TiVQBea6_737461747573ZQCg@Base 12
+ _D3std7process8Redirect6__initZ@Base 12
+ _D3std7process8execvpe_FIAyaIAQfIQeZi@Base 12
+ _D3std7process9createEnvFxHAyaAyabZPxPa@Base 12
+ _D3std7process9pipeShellFNfMAxaEQBeQBd8RedirectxHAyaAyaSQCcQCb6ConfigMQBqQyZSQCxQCw12ProcessPipes@Base 12
+ _D3std7process9userShellFNdNfZAya@Base 12
+ _D3std7process__T11executeImplSQBdQBc11pipeProcessTAxAaZQBnFNeQlxHAyaAyaSQCtQCs6ConfigmMAxaZSQDn8typecons__T5TupleTiVQBza6_737461747573TQCsVQCwa6_6f7574707574ZQBz@Base 12
+ _D3std7process__T11executeImplSQBdQBc11pipeProcessTAxaZQBmFNeQkxHAyaAyaSQCsQCr6ConfigmMQBkZSQDm8typecons__T5TupleTiVQBza6_737461747573TQCsVQCwa6_6f7574707574ZQBz@Base 12
+ _D3std7process__T11executeImplS_DQBfQBe9pipeShellFNfMAxaEQCdQCc8RedirectxHAyaAyaSQDbQDa6ConfigMQBqQyZSQDwQDv12ProcessPipesTQCsTQCbZQEkFNeQDgxQCqQCmmMQDsQDaZSQFz8typecons__T5TupleTiVQEda6_737461747573TQEwVQFaa6_6f7574707574ZQBz@Base 12
+ _D3std7process__T12isExecutableTSQBf5range__T5chainTSQBz3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQDyQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFkFNbNiNeQFfZb@Base 12
+ _D3std7process__T15pipeProcessImplSQBhQBg10spawnShellTAxaTAyaZQBtFNeMQpEQCsQCr8RedirectxHQBfAyaSQDqQDp6ConfigMQCeQCdZSQEmQEl12ProcessPipes@Base 12
+ _D3std7process__T15pipeProcessImplSQBhQBg12spawnProcessTAxAaZQBsFNeMQmEQCrQCq8RedirectxHAyaAyaSQDpQDo6ConfigMAxaZSQEiQEh12ProcessPipes@Base 12
+ _D3std7process__T15pipeProcessImplSQBhQBg12spawnProcessTAxaZQBrFNeMQlEQCqQCp8RedirectxHAyaAyaSQDoQDn6ConfigMQCaZSQEhQEg12ProcessPipes@Base 12
+ _D3std7process__T23escapePosixArgumentImplS_DQBrQBq13charAllocatorFNaNbNfmZAaZQCjFNaNbNfMAxaZQs@Base 12
+ _D3std7process__T25escapeWindowsArgumentImplS_DQBtQBs13charAllocatorFNaNbNfmZAaZQClFNaNbNfMAxaZQs@Base 12
+ _D3std7signals11__moduleRefZ@Base 12
+ _D3std7signals12__ModuleInfoZ@Base 12
+ _D3std7signals6linkinFZv@Base 12
+ _D3std7sumtype11__moduleRefZ@Base 12
+ _D3std7sumtype12__ModuleInfoZ@Base 12
+ _D3std7sumtype14MatchException6__ctorMFNaNbNiNfAyaQdmZCQCbQCaQBv@Base 12
+ _D3std7sumtype14MatchException6__initZ@Base 12
+ _D3std7sumtype14MatchException6__vtblZ@Base 12
+ _D3std7sumtype14MatchException7__ClassZ@Base 12
+ _D3std7sumtype4This6__initZ@Base 12
+ _D3std7variant11__moduleRefZ@Base 12
+ _D3std7variant12__ModuleInfoZ@Base 12
+ _D3std7variant15FakeComplexReal11__xopEqualsMxFKxSQBwQBvQBqZb@Base 12
+ _D3std7variant15FakeComplexReal6__initZ@Base 12
+ _D3std7variant15FakeComplexReal9__xtoHashFNbNeKxSQBvQBuQBpZm@Base 12
+ _D3std7variant16VariantException6__ctorMFAyaZCQBsQBrQBm@Base 12
+ _D3std7variant16VariantException6__ctorMFC8TypeInfoQkZCQCbQCaQBv@Base 12
+ _D3std7variant16VariantException6__initZ@Base 12
+ _D3std7variant16VariantException6__vtblZ@Base 12
+ _D3std7variant16VariantException7__ClassZ@Base 12
+ _D3std7variant__T8VariantNVmi32ZQp10__postblitMFZv@Base 12
+ _D3std7variant__T8VariantNVmi32ZQp11SizeChecker6__initZ@Base 12
+ _D3std7variant__T8VariantNVmi32ZQp11__xopEqualsMxFKxSQBzQBy__TQBtVmi32ZQCcZb@Base 12
+ _D3std7variant__T8VariantNVmi32ZQp4typeMxFNbNdNeZC8TypeInfo@Base 12
+ _D3std7variant__T8VariantNVmi32ZQp5opCmpMxFKxSQBsQBr__TQBmVmi32ZQBvZi@Base 12
+ _D3std7variant__T8VariantNVmi32ZQp6__dtorMFNfZv@Base 12
+ _D3std7variant__T8VariantNVmi32ZQp6__initZ@Base 12
+ _D3std7variant__T8VariantNVmi32ZQp6lengthMFNdZm@Base 12
+ _D3std7variant__T8VariantNVmi32ZQp6toHashMxFNbNfZm@Base 12
+ _D3std7variant__T8VariantNVmi32ZQp8hasValueMxFNaNbNdNiNfZb@Base 12
+ _D3std7variant__T8VariantNVmi32ZQp8toStringMFZAya@Base 12
+ _D3std7variant__T8VariantNVmi32ZQp__T10convertsToTSQBx11concurrency3TidZQBjMxFNdZb@Base 12
+ _D3std7variant__T8VariantNVmi32ZQp__T3getTSQBp11concurrency3TidZQBbMNgFNdZNgSQCxQBiQy@Base 12
+ _D3std7variant__T8VariantNVmi32ZQp__T4peekTvZQiMNgFNbNdNfZPNgv@Base 12
+ _D3std7variant__T8VariantNVmi32ZQp__T5opCmpTSQBrQBq__TQBlVmi32ZQBuZQBeMFQBcZi@Base 12
+ _D3std7variant__T8VariantNVmi32ZQp__T6__ctorTSQBs11concurrency3TidZQBeMFNcQBdZSQCzQCy__TQCtVmi32ZQDc@Base 12
+ _D3std7variant__T8VariantNVmi32ZQp__T7handlerHTvZQmFEQBzQBy__TQBtVmi32ZQCc4OpIDPG32hPvZl@Base 12
+ _D3std7variant__T8VariantNVmi32ZQp__T7handlerTSQBt11concurrency3TidZQBfFEQCtQCs__TQCnVmi32ZQCw4OpIDPG32hPvZ10tryPuttingFPQCxC8TypeInfoQBeZb@Base 12
+ _D3std7variant__T8VariantNVmi32ZQp__T7handlerTSQBt11concurrency3TidZQBfFEQCtQCs__TQCnVmi32ZQCw4OpIDPG32hPvZ6getPtrFNaNbNiQrZPQDb@Base 12
+ _D3std7variant__T8VariantNVmi32ZQp__T7handlerTSQBt11concurrency3TidZQBfFEQCtQCs__TQCnVmi32ZQCw4OpIDPG32hPvZ7compareFPQCtQeQByZl@Base 12
+ _D3std7variant__T8VariantNVmi32ZQp__T7handlerTSQBt11concurrency3TidZQBfFEQCtQCs__TQCnVmi32ZQCw4OpIDPG32hPvZl@Base 12
+ _D3std7variant__T8VariantNVmi32ZQp__T8opAssignTSQBu11concurrency3TidZQBgMFQBbZSQCzQCy__TQCtVmi32ZQDc@Base 12
+ _D3std7variant__T8VariantNVmi32ZQp__T8opEqualsTSQBuQBt__TQBoVmi32ZQBxZQBhMxFKQBeZb@Base 12
+ _D3std7variant__T8VariantNVmi32ZQp__T8opEqualsTxSQBvQBu__TQBpVmi32ZQByZQBiMxFKxQBfZb@Base 12
+ _D3std7windows7charset11__moduleRefZ@Base 12
+ _D3std7windows7charset12__ModuleInfoZ@Base 12
+ _D3std7windows8registry11__moduleRefZ@Base 12
+ _D3std7windows8registry12__ModuleInfoZ@Base 12
+ _D3std7windows8syserror11__moduleRefZ@Base 12
+ _D3std7windows8syserror12__ModuleInfoZ@Base 12
+ _D3std8bitmanip10myToStringFNaNfmZAya@Base 12
+ _D3std8bitmanip11__moduleRefZ@Base 12
+ _D3std8bitmanip12__ModuleInfoZ@Base 12
+ _D3std8bitmanip15getBitsForAlignFmZm@Base 12
+ _D3std8bitmanip8BitArray13opIndexAssignMFNaNbNibmZb@Base 12
+ _D3std8bitmanip8BitArray13opSliceAssignMFNaNbNibZv@Base 12
+ _D3std8bitmanip8BitArray13opSliceAssignMFNaNbNibmmZv@Base 12
+ _D3std8bitmanip8BitArray3dimMxFNaNbNdNiNfZm@Base 12
+ _D3std8bitmanip8BitArray3dupMxFNaNbNdZSQBlQBkQBe@Base 12
+ _D3std8bitmanip8BitArray4flipMFNaNbNiZv@Base 12
+ _D3std8bitmanip8BitArray4flipMFNaNbNimZv@Base 12
+ _D3std8bitmanip8BitArray4sortMFNaNbNdNiNjZSQBpQBoQBi@Base 12
+ _D3std8bitmanip8BitArray5countMxFNaNbNiZm@Base 12
+ _D3std8bitmanip8BitArray5opCmpMxFNaNbNiSQBmQBlQBfZi@Base 12
+ _D3std8bitmanip8BitArray6__ctorMFNaNbNcIAbZSQBqQBpQBj@Base 12
+ _D3std8bitmanip8BitArray6__ctorMFNaNbNcNiAvmZSQBsQBrQBl@Base 12
+ _D3std8bitmanip8BitArray6__ctorMFNaNbNcNimPmZSQBsQBrQBl@Base 12
+ _D3std8bitmanip8BitArray6__initZ@Base 12
+ _D3std8bitmanip8BitArray6lengthMFNaNbNdmZm@Base 12
+ _D3std8bitmanip8BitArray6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std8bitmanip8BitArray6toHashMxFNaNbNiZm@Base 12
+ _D3std8bitmanip8BitArray7bitsSetMxFNaNbNdZSQBp5range__T5chainTSQCj9algorithm9iteration__T6joinerTSQDsQBjQBc__T9MapResultSQEpQEoQEiQEcMxFNbNdZ9__lambda2TSQFvQDmQDf__T12FilterResultSQGwQGvQGpQGjMxFNbNdZ9__lambda1TSQIcQGn__T4iotaTmTmZQkFmmZ6ResultZQDcZQFjZQGiFQGeZQyTSQKdQHuQHn__TQEiSQKtQKsQKmQKgMxFNbNdZ9__lambda3TSQLzQKk__TQDxTmTxmZQEgFmxmZQDyZQGwZQLgFQLdQDmZQEr@Base 12
+ _D3std8bitmanip8BitArray7endBitsMxFNaNbNdNiZm@Base 12
+ _D3std8bitmanip8BitArray7endMaskMxFNaNbNdNiZm@Base 12
+ _D3std8bitmanip8BitArray7opApplyMFMDFKbZiZi@Base 12
+ _D3std8bitmanip8BitArray7opApplyMFMDFmKbZiZi@Base 12
+ _D3std8bitmanip8BitArray7opApplyMxFMDFbZiZi@Base 12
+ _D3std8bitmanip8BitArray7opApplyMxFMDFmbZiZi@Base 12
+ _D3std8bitmanip8BitArray7opIndexMxFNaNbNimZb@Base 12
+ _D3std8bitmanip8BitArray7reverseMFNaNbNdNiNjZSQBsQBrQBl@Base 12
+ _D3std8bitmanip8BitArray8lenToDimFNaNbNiNfmZm@Base 12
+ _D3std8bitmanip8BitArray8opEqualsMxFNaNbNiKxSQBrQBqQBkZb@Base 12
+ _D3std8bitmanip8BitArray9fullWordsMxFNaNbNdNiZm@Base 12
+ _D3std8bitmanip__T10swapEndianTaZQpFNaNbNiNfxaZa@Base 12
+ _D3std8bitmanip__T10swapEndianTbZQpFNaNbNiNfxbZb@Base 12
+ _D3std8bitmanip__T10swapEndianThZQpFNaNbNiNfxhZh@Base 12
+ _D3std8bitmanip__T10swapEndianTiZQpFNaNbNiNfxiZi@Base 12
+ _D3std8bitmanip__T10swapEndianTlZQpFNaNbNiNfxlZl@Base 12
+ _D3std8bitmanip__T12countBitsSetTmZQrFNaNbNiNfxmZk@Base 12
+ _D3std8bitmanip__T13EndianSwapperTaZQs6__initZ@Base 12
+ _D3std8bitmanip__T13EndianSwapperTbZQs6__initZ@Base 12
+ _D3std8bitmanip__T13EndianSwapperThZQs6__initZ@Base 12
+ _D3std8bitmanip__T13EndianSwapperTiZQs6__initZ@Base 12
+ _D3std8bitmanip__T13EndianSwapperTkZQs6__initZ@Base 12
+ _D3std8bitmanip__T13EndianSwapperTlZQs6__initZ@Base 12
+ _D3std8bitmanip__T13EndianSwapperTmZQs6__initZ@Base 12
+ _D3std8bitmanip__T13EndianSwapperTtZQs6__initZ@Base 12
+ _D3std8bitmanip__T17bigEndianToNativeTaVmi1ZQBaFNaNbNiNfG1hZa@Base 12
+ _D3std8bitmanip__T17bigEndianToNativeTbVmi1ZQBaFNaNbNiNfG1hZb@Base 12
+ _D3std8bitmanip__T17bigEndianToNativeThVmi1ZQBaFNaNbNiNfG1hZh@Base 12
+ _D3std8bitmanip__T17bigEndianToNativeTiVmi4ZQBaFNaNbNiNfG4hZi@Base 12
+ _D3std8bitmanip__T17bigEndianToNativeTlVmi8ZQBaFNaNbNiNfG8hZl@Base 12
+ _D3std8bitmanip__T18endianToNativeImplVbi0TkVmi4ZQBfFNaNbNiNfG4hZk@Base 12
+ _D3std8bitmanip__T18endianToNativeImplVbi0TmVmi8ZQBfFNaNbNiNfG8hZm@Base 12
+ _D3std8bitmanip__T18endianToNativeImplVbi0TtVmi2ZQBfFNaNbNiNfG2hZt@Base 12
+ _D3std8bitmanip__T18endianToNativeImplVbi1TaVmi1ZQBfFNaNbNiNfG1hZa@Base 12
+ _D3std8bitmanip__T18endianToNativeImplVbi1TbVmi1ZQBfFNaNbNiNfG1hZb@Base 12
+ _D3std8bitmanip__T18endianToNativeImplVbi1ThVmi1ZQBfFNaNbNiNfG1hZh@Base 12
+ _D3std8bitmanip__T18endianToNativeImplVbi1TiVmi4ZQBfFNaNbNiNfG4hZi@Base 12
+ _D3std8bitmanip__T18endianToNativeImplVbi1TlVmi8ZQBfFNaNbNiNfG8hZl@Base 12
+ _D3std8bitmanip__T18nativeToEndianImplVbi0TkZQBbFNaNbNiNfxkZG4h@Base 12
+ _D3std8bitmanip__T18nativeToEndianImplVbi0TmZQBbFNaNbNiNfxmZG8h@Base 12
+ _D3std8bitmanip__T18nativeToEndianImplVbi0TtZQBbFNaNbNiNfxtZG2h@Base 12
+ _D3std8bitmanip__T20littleEndianToNativeTkVmi4ZQBdFNaNbNiNfG4hZk@Base 12
+ _D3std8bitmanip__T20littleEndianToNativeTmVmi8ZQBdFNaNbNiNfG8hZm@Base 12
+ _D3std8bitmanip__T20littleEndianToNativeTtVmi2ZQBdFNaNbNiNfG2hZt@Base 12
+ _D3std8bitmanip__T20nativeToLittleEndianTkZQzFNaNbNiNfxkZG4h@Base 12
+ _D3std8bitmanip__T20nativeToLittleEndianTmZQzFNaNbNiNfxmZG8h@Base 12
+ _D3std8bitmanip__T20nativeToLittleEndianTtZQzFNaNbNiNfxtZG2h@Base 12
+ _D3std8bitmanip__T27FloatingPointRepresentationTdZQBg4signMFNaNbNdNiNfbZv@Base 12
+ _D3std8bitmanip__T27FloatingPointRepresentationTdZQBg4signMxFNaNbNdNiNfZb@Base 12
+ _D3std8bitmanip__T27FloatingPointRepresentationTdZQBg6__initZ@Base 12
+ _D3std8bitmanip__T27FloatingPointRepresentationTdZQBg8exponentMFNaNbNdNiNftZv@Base 12
+ _D3std8bitmanip__T27FloatingPointRepresentationTdZQBg8exponentMxFNaNbNdNiNfZt@Base 12
+ _D3std8bitmanip__T27FloatingPointRepresentationTdZQBg8fractionMFNaNbNdNiNfmZv@Base 12
+ _D3std8bitmanip__T27FloatingPointRepresentationTdZQBg8fractionMxFNaNbNdNiNfZm@Base 12
+ _D3std8bitmanip__T27FloatingPointRepresentationTfZQBg4signMFNaNbNdNiNfbZv@Base 12
+ _D3std8bitmanip__T27FloatingPointRepresentationTfZQBg4signMxFNaNbNdNiNfZb@Base 12
+ _D3std8bitmanip__T27FloatingPointRepresentationTfZQBg6__initZ@Base 12
+ _D3std8bitmanip__T27FloatingPointRepresentationTfZQBg8exponentMFNaNbNdNiNfhZv@Base 12
+ _D3std8bitmanip__T27FloatingPointRepresentationTfZQBg8exponentMxFNaNbNdNiNfZh@Base 12
+ _D3std8bitmanip__T27FloatingPointRepresentationTfZQBg8fractionMFNaNbNdNiNfkZv@Base 12
+ _D3std8bitmanip__T27FloatingPointRepresentationTfZQBg8fractionMxFNaNbNdNiNfZk@Base 12
+ _D3std8bitmanip__T7BitsSetTmZQl4saveMxFNaNbNdNiNfZSQBxQBw__TQBqTmZQBw@Base 12
+ _D3std8bitmanip__T7BitsSetTmZQl5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std8bitmanip__T7BitsSetTmZQl5frontMxFNaNbNdNiNfZm@Base 12
+ _D3std8bitmanip__T7BitsSetTmZQl6__ctorMFNaNbNcNiNfmmZSQCaQBz__TQBtTmZQBz@Base 12
+ _D3std8bitmanip__T7BitsSetTmZQl6__initZ@Base 12
+ _D3std8bitmanip__T7BitsSetTmZQl6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std8bitmanip__T7BitsSetTmZQl8popFrontMFNaNbNiNfZv@Base 12
+ _D3std8bitmanip__T8ctfeReadTaZQmFNaNbNiNfxG1hZa@Base 12
+ _D3std8bitmanip__T8ctfeReadTbZQmFNaNbNiNfxG1hZb@Base 12
+ _D3std8bitmanip__T8ctfeReadThZQmFNaNbNiNfxG1hZh@Base 12
+ _D3std8bitmanip__T8ctfeReadTiZQmFNaNbNiNfxG4hZi@Base 12
+ _D3std8bitmanip__T8ctfeReadTkZQmFNaNbNiNfxG4hZk@Base 12
+ _D3std8bitmanip__T8ctfeReadTlZQmFNaNbNiNfxG8hZl@Base 12
+ _D3std8bitmanip__T8ctfeReadTmZQmFNaNbNiNfxG8hZm@Base 12
+ _D3std8bitmanip__T8ctfeReadTtZQmFNaNbNiNfxG2hZt@Base 12
+ _D3std8bitmanip__T9bitfieldsTbVAyaa13_6361736553656e736974697665Vii1TbVQBoa8_62756e646c696e67Vii1TbVQCra11_706173735468726f756768Vii1TbVQEba20_73746f704f6e46697273744e6f6e4f7074696f6eVii1TbVQGda16_6b656570456e644f664f7074696f6e73Vii1TbVQHxa8_7265717569726564Vii1ThVQJaa0_Vii2ZQJyFNaNbNiNfZQJy@Base 12
+ _D3std8bitmanip__T9bitfieldsTbVAyaa6_666c44617368Vii1TbVQza6_666c5a65726fVii1TbVQBxa7_666c5370616365Vii1TbVQCya6_666c506c7573Vii1TbVQDxa6_666c48617368Vii1TbVQEwa7_666c457175616cVii1TbVQFxa11_666c536570617261746f72Vii1ThVQHha0_Vii1ZQIfFNaNbNiNfZQIf@Base 12
+ _D3std8bitmanip__T9bitfieldsTkVAyaa8_6672616374696f6eVki23ThVQBea8_6578706f6e656e74Vki8TbVQCha4_7369676eVki1ZQDnFNaNbNiNfZQDn@Base 12
+ _D3std8bitmanip__T9bitfieldsTmVAyaa8_6672616374696f6eVki52TtVQBea8_6578706f6e656e74Vki11TbVQCia4_7369676eVki1ZQDoFNaNbNiNfZQDo@Base 12
+ _D3std8bitmanip__T9ctfeBytesTkZQnFNaNbNiNfxkZG4h@Base 12
+ _D3std8bitmanip__T9ctfeBytesTmZQnFNaNbNiNfxmZG8h@Base 12
+ _D3std8bitmanip__T9ctfeBytesTtZQnFNaNbNiNfxtZG2h@Base 12
+ _D3std8compiler11__moduleRefZ@Base 12
+ _D3std8compiler12__ModuleInfoZ@Base 12
+ _D3std8compiler13version_majoryk@Base 12
+ _D3std8compiler13version_minoryk@Base 12
+ _D3std8compiler4nameyAa@Base 12
+ _D3std8compiler6vendoryEQwQu6Vendor@Base 12
+ _D3std8compiler7D_majoryk@Base 12
+ _D3std8compiler7D_minoryk@Base 12
+ _D3std8datetime11__moduleRefZ@Base 12
+ _D3std8datetime12__ModuleInfoZ@Base 12
+ _D3std8datetime4date11__moduleRefZ@Base 12
+ _D3std8datetime4date11_monthNamesyG12Aa@Base 12
+ _D3std8datetime4date11lastDayLeapyG13i@Base 12
+ _D3std8datetime4date11timeStringsyAAa@Base 12
+ _D3std8datetime4date12__ModuleInfoZ@Base 12
+ _D3std8datetime4date12cmpTimeUnitsFNaNfAyaQdZi@Base 12
+ _D3std8datetime4date12getDayOfWeekFNaNbNiNfiZEQBsQBrQBl9DayOfWeek@Base 12
+ _D3std8datetime4date13monthToStringFNaNfEQBnQBmQBg5MonthZAya@Base 12
+ _D3std8datetime4date13monthsToMonthFNaNfiiZi@Base 12
+ _D3std8datetime4date14lastDayNonLeapyG13i@Base 12
+ _D3std8datetime4date14validTimeUnitsFNaNbNiNfAAyaXb@Base 12
+ _D3std8datetime4date14yearIsLeapYearFNaNbNiNfiZb@Base 12
+ _D3std8datetime4date15daysToDayOfWeekFNaNbNiNfEQBtQBsQBm9DayOfWeekQuZi@Base 12
+ _D3std8datetime4date16cmpTimeUnitsCTFEFNaNbNiNfAyaQdZi@Base 12
+ _D3std8datetime4date4Date10diffMonthsMxFNaNbNiNfSQBvQBuQBoQBmZi@Base 12
+ _D3std8datetime4date4Date10endOfMonthMxFNaNbNdNfZSQBwQBvQBpQBn@Base 12
+ _D3std8datetime4date4Date10isLeapYearMxFNaNbNdNiNfZb@Base 12
+ _D3std8datetime4date4Date11__invariantMxFNaNfZv@Base 12
+ _D3std8datetime4date4Date11daysInMonthMxFNaNbNdNiNfZh@Base 12
+ _D3std8datetime4date4Date11fromISOWeekFNaNbNiNfshEQBwQBvQBp9DayOfWeekZSQCrQCqQCkQCi@Base 12
+ _D3std8datetime4date4Date11isoWeekYearMxFNaNbNdNfZs@Base 12
+ _D3std8datetime4date4Date11toISOStringMxFNaNbNfZAya@Base 12
+ _D3std8datetime4date4Date12__invariant0MxFNaNfZv@Base 12
+ _D3std8datetime4date4Date12modJulianDayMxFNaNbNdNiNfZl@Base 12
+ _D3std8datetime4date4Date14isoWeekAndYearMxFNaNbNdNfZ14ISOWeekAndYear6__initZ@Base 12
+ _D3std8datetime4date4Date14isoWeekAndYearMxFNaNbNdNfZSQCaQBzQBtQBrQBpMxFNaNbNdNfZ14ISOWeekAndYear@Base 12
+ _D3std8datetime4date4Date14toISOExtStringMxFNaNbNfZAya@Base 12
+ _D3std8datetime4date4Date14toSimpleStringMxFNaNbNfZAya@Base 12
+ _D3std8datetime4date4Date17dayOfGregorianCalMFNaNbNdNiNfiZv@Base 12
+ _D3std8datetime4date4Date17dayOfGregorianCalMxFNaNbNdNiNfZi@Base 12
+ _D3std8datetime4date4Date3dayMFNaNdNfiZv@Base 12
+ _D3std8datetime4date4Date3dayMxFNaNbNdNiNfZh@Base 12
+ _D3std8datetime4date4Date3maxFNaNbNdNiNfZSQBoQBnQBhQBf@Base 12
+ _D3std8datetime4date4Date3minFNaNbNdNiNfZSQBoQBnQBhQBf@Base 12
+ _D3std8datetime4date4Date4isADMxFNaNbNdNiNfZb@Base 12
+ _D3std8datetime4date4Date4yearMFNaNdNfiZv@Base 12
+ _D3std8datetime4date4Date4yearMxFNaNbNdNiNfZs@Base 12
+ _D3std8datetime4date4Date5monthMFNaNdNfEQBmQBlQBf5MonthZv@Base 12
+ _D3std8datetime4date4Date5monthMxFNaNbNdNiNfZEQBsQBrQBl5Month@Base 12
+ _D3std8datetime4date4Date5opCmpMxFNaNbNiNfSQBpQBoQBiQBgZi@Base 12
+ _D3std8datetime4date4Date6__ctorMFNaNbNcNiNfiZSQBtQBsQBmQBk@Base 12
+ _D3std8datetime4date4Date6__ctorMFNaNcNfiiiZSQBrQBqQBkQBi@Base 12
+ _D3std8datetime4date4Date6__initZ@Base 12
+ _D3std8datetime4date4Date6_validFNaNbNiNfiiiZb@Base 12
+ _D3std8datetime4date4Date6yearBCMFNaNdNfiZv@Base 12
+ _D3std8datetime4date4Date6yearBCMxFNaNdNfZt@Base 12
+ _D3std8datetime4date4Date7isoWeekMxFNaNbNdNfZh@Base 12
+ _D3std8datetime4date4Date8__xopCmpMxFKxSQBmQBlQBfQBdZi@Base 12
+ _D3std8datetime4date4Date8_addDaysMFNaNbNcNiNjNflZSQBxQBwQBqQBo@Base 12
+ _D3std8datetime4date4Date8toStringMxFNaNbNfZAya@Base 12
+ _D3std8datetime4date4Date9dayOfWeekMxFNaNbNdNiNfZEQBwQBvQBp9DayOfWeek@Base 12
+ _D3std8datetime4date4Date9dayOfYearMFNaNdNfiZv@Base 12
+ _D3std8datetime4date4Date9dayOfYearMxFNaNbNdNiNfZt@Base 12
+ _D3std8datetime4date4Date9julianDayMxFNaNbNdNiNfZl@Base 12
+ _D3std8datetime4date4Date__T11toISOStringTSQBp5array__T8AppenderTAyaZQoZQBsMxFNaNfKQBpZv@Base 12
+ _D3std8datetime4date4Date__T12setDayOfYearVbi0ZQtMFNaNbNiNfiZv@Base 12
+ _D3std8datetime4date4Date__T12setDayOfYearVbi1ZQtMFNaNfiZv@Base 12
+ _D3std8datetime4date4Date__T14toISOExtStringTSQBs5array__T8AppenderTAyaZQoZQBvMxFNaNfKQBpZv@Base 12
+ _D3std8datetime4date4Date__T14toSimpleStringTSQBs5array__T8AppenderTAyaZQoZQBvMxFNaNfKQBpZv@Base 12
+ _D3std8datetime4date4Date__T8opBinaryVAyaa1_2dZQtMxFNaNbNiNfSQChQCgQCaQByZS4core4time8Duration@Base 12
+ _D3std8datetime4date5Month6__initZ@Base 12
+ _D3std8datetime4date6maxDayFNaNbNiNfiiZh@Base 12
+ _D3std8datetime4date8DateTime10diffMonthsMxFNaNbNiNfSQBzQByQBsQBqZi@Base 12
+ _D3std8datetime4date8DateTime10endOfMonthMxFNaNbNdNfZSQCaQBzQBtQBr@Base 12
+ _D3std8datetime4date8DateTime10isLeapYearMxFNaNbNdNiNfZb@Base 12
+ _D3std8datetime4date8DateTime11_addSecondsMFNaNbNcNiNjNflZSQCfQCeQByQBw@Base 12
+ _D3std8datetime4date8DateTime11daysInMonthMxFNaNbNdNiNfZh@Base 12
+ _D3std8datetime4date8DateTime11isoWeekYearMxFNaNbNdNfZs@Base 12
+ _D3std8datetime4date8DateTime11toISOStringMxFNaNbNfZAya@Base 12
+ _D3std8datetime4date8DateTime12modJulianDayMxFNaNbNdNiNfZl@Base 12
+ _D3std8datetime4date8DateTime14toISOExtStringMxFNaNbNfZAya@Base 12
+ _D3std8datetime4date8DateTime14toSimpleStringMxFNaNbNfZAya@Base 12
+ _D3std8datetime4date8DateTime17dayOfGregorianCalMFNaNbNdNiNfiZv@Base 12
+ _D3std8datetime4date8DateTime17dayOfGregorianCalMxFNaNbNdNiNfZi@Base 12
+ _D3std8datetime4date8DateTime3dayMFNaNdNfiZv@Base 12
+ _D3std8datetime4date8DateTime3dayMxFNaNbNdNiNfZh@Base 12
+ _D3std8datetime4date8DateTime3maxFNaNbNdNiNfZSQBsQBrQBlQBj@Base 12
+ _D3std8datetime4date8DateTime3minFNaNbNdNiNfZSQBsQBrQBlQBj@Base 12
+ _D3std8datetime4date8DateTime4hourMFNaNdNfiZv@Base 12
+ _D3std8datetime4date8DateTime4hourMxFNaNbNdNiNfZh@Base 12
+ _D3std8datetime4date8DateTime4isADMxFNaNbNdNiNfZb@Base 12
+ _D3std8datetime4date8DateTime4yearMFNaNdNfiZv@Base 12
+ _D3std8datetime4date8DateTime4yearMxFNaNbNdNiNfZs@Base 12
+ _D3std8datetime4date8DateTime5monthMFNaNdNfEQBqQBpQBj5MonthZv@Base 12
+ _D3std8datetime4date8DateTime5monthMxFNaNbNdNiNfZEQBwQBvQBp5Month@Base 12
+ _D3std8datetime4date8DateTime5opCmpMxFNaNbNiNfSQBtQBsQBmQBkZi@Base 12
+ _D3std8datetime4date8DateTime6__ctorMFNaNbNcNiNfSQBvQBuQBo4DateSQCkQCjQCd9TimeOfDayZSQDfQDeQCyQCw@Base 12
+ _D3std8datetime4date8DateTime6__ctorMFNaNcNfiiiiiiZSQByQBxQBrQBp@Base 12
+ _D3std8datetime4date8DateTime6__initZ@Base 12
+ _D3std8datetime4date8DateTime6minuteMFNaNdNfiZv@Base 12
+ _D3std8datetime4date8DateTime6minuteMxFNaNbNdNiNfZh@Base 12
+ _D3std8datetime4date8DateTime6secondMFNaNdNfiZv@Base 12
+ _D3std8datetime4date8DateTime6secondMxFNaNbNdNiNfZh@Base 12
+ _D3std8datetime4date8DateTime6yearBCMFNaNdNfiZv@Base 12
+ _D3std8datetime4date8DateTime6yearBCMxFNaNdNfZs@Base 12
+ _D3std8datetime4date8DateTime7isoWeekMxFNaNbNdNfZh@Base 12
+ _D3std8datetime4date8DateTime8__xopCmpMxFKxSQBqQBpQBjQBhZi@Base 12
+ _D3std8datetime4date8DateTime8toStringMxFNaNbNfZAya@Base 12
+ _D3std8datetime4date8DateTime9dayOfWeekMxFNaNbNdNiNfZEQCaQBzQBt9DayOfWeek@Base 12
+ _D3std8datetime4date8DateTime9dayOfYearMFNaNdNfiZv@Base 12
+ _D3std8datetime4date8DateTime9dayOfYearMxFNaNbNdNiNfZt@Base 12
+ _D3std8datetime4date8DateTime9julianDayMxFNaNbNdNiNfZl@Base 12
+ _D3std8datetime4date8DateTime9timeOfDayMFNaNbNdNiNfSQByQBxQBr9TimeOfDayZv@Base 12
+ _D3std8datetime4date8DateTime9timeOfDayMxFNaNbNdNiNfZSQCaQBzQBt9TimeOfDay@Base 12
+ _D3std8datetime4date8DateTimeQoMFNaNbNdNiNfSQBqQBpQBj4DateZv@Base 12
+ _D3std8datetime4date8DateTimeQoMxFNaNbNdNiNfZSQBsQBrQBl4Date@Base 12
+ _D3std8datetime4date8DateTime__T11toISOStringTSQBt5array__T8AppenderTAyaZQoZQBsMxFNaNfKQBpZv@Base 12
+ _D3std8datetime4date8DateTime__T14toISOExtStringTSQBw5array__T8AppenderTAyaZQoZQBvMxFNaNfKQBpZv@Base 12
+ _D3std8datetime4date8DateTime__T14toSimpleStringTSQBw5array__T8AppenderTAyaZQoZQBvMxFNaNfKQBpZv@Base 12
+ _D3std8datetime4date9TimeOfDay11__invariantMxFNaNfZv@Base 12
+ _D3std8datetime4date9TimeOfDay11_addSecondsMFNaNbNcNiNjNflZSQCgQCfQBzQBx@Base 12
+ _D3std8datetime4date9TimeOfDay11toISOStringMxFNaNbNfZAya@Base 12
+ _D3std8datetime4date9TimeOfDay12__invariant0MxFNaNfZv@Base 12
+ _D3std8datetime4date9TimeOfDay14toISOExtStringMxFNaNbNfZAya@Base 12
+ _D3std8datetime4date9TimeOfDay3maxFNaNbNdNiNfZSQBtQBsQBmQBk@Base 12
+ _D3std8datetime4date9TimeOfDay3minFNaNbNdNiNfZSQBtQBsQBmQBk@Base 12
+ _D3std8datetime4date9TimeOfDay4hourMFNaNdNfiZv@Base 12
+ _D3std8datetime4date9TimeOfDay4hourMxFNaNbNdNiNfZh@Base 12
+ _D3std8datetime4date9TimeOfDay5opCmpMxFNaNbNiNfSQBuQBtQBnQBlZi@Base 12
+ _D3std8datetime4date9TimeOfDay6__ctorMFNaNcNfiiiZSQBwQBvQBpQBn@Base 12
+ _D3std8datetime4date9TimeOfDay6__initZ@Base 12
+ _D3std8datetime4date9TimeOfDay6_validFNaNbNiNfiiiZb@Base 12
+ _D3std8datetime4date9TimeOfDay6minuteMFNaNdNfiZv@Base 12
+ _D3std8datetime4date9TimeOfDay6minuteMxFNaNbNdNiNfZh@Base 12
+ _D3std8datetime4date9TimeOfDay6secondMFNaNdNfiZv@Base 12
+ _D3std8datetime4date9TimeOfDay6secondMxFNaNbNdNiNfZh@Base 12
+ _D3std8datetime4date9TimeOfDay8__xopCmpMxFKxSQBrQBqQBkQBiZi@Base 12
+ _D3std8datetime4date9TimeOfDay8toStringMxFNaNbNfZAya@Base 12
+ _D3std8datetime4date9TimeOfDay__T11toISOStringTSQBu5array__T8AppenderTAyaZQoZQBsMxFNaNfKQBpZv@Base 12
+ _D3std8datetime4date9TimeOfDay__T14toISOExtStringTSQBx5array__T8AppenderTAyaZQoZQBvMxFNaNfKQBpZv@Base 12
+ _D3std8datetime4date9TimeOfDay__T8opBinaryVAyaa1_2dZQtMxFNaNbNiNfSQCmQClQCfQCdZS4core4time8Duration@Base 12
+ _D3std8datetime4date__T12enforceValidVAyaa4_64617973ZQBeFNaNfiEQCjQCiQCc5MonthiQBpmZv@Base 12
+ _D3std8datetime4date__T12enforceValidVAyaa5_686f757273ZQBgFNaNfiQBamZv@Base 12
+ _D3std8datetime4date__T12enforceValidVAyaa6_6d6f6e746873ZQBiFNaNfiQBcmZv@Base 12
+ _D3std8datetime4date__T12enforceValidVAyaa7_6d696e75746573ZQBkFNaNfiQBemZv@Base 12
+ _D3std8datetime4date__T12enforceValidVAyaa7_7365636f6e6473ZQBkFNaNfiQBemZv@Base 12
+ _D3std8datetime4date__T20splitUnitsFromHNSecsVAyaa4_64617973ZQBmFNaNbNiNfKlZl@Base 12
+ _D3std8datetime4date__T20splitUnitsFromHNSecsVAyaa5_686f757273ZQBoFNaNbNiNfKlZl@Base 12
+ _D3std8datetime4date__T20splitUnitsFromHNSecsVAyaa7_6d696e75746573ZQBsFNaNbNiNfKlZl@Base 12
+ _D3std8datetime4date__T20splitUnitsFromHNSecsVAyaa7_7365636f6e6473ZQBsFNaNbNiNfKlZl@Base 12
+ _D3std8datetime4date__T5validVAyaa4_64617973ZQwFNaNbNiNfiiiZb@Base 12
+ _D3std8datetime4date__T5validVAyaa5_686f757273ZQyFNaNbNiNfiZb@Base 12
+ _D3std8datetime4date__T5validVAyaa6_6d6f6e746873ZQBaFNaNbNiNfiZb@Base 12
+ _D3std8datetime4date__T5validVAyaa7_6d696e75746573ZQBcFNaNbNiNfiZb@Base 12
+ _D3std8datetime4date__T5validVAyaa7_7365636f6e6473ZQBcFNaNbNiNfiZb@Base 12
+ _D3std8datetime7systime11__moduleRefZ@Base 12
+ _D3std8datetime7systime12__ModuleInfoZ@Base 12
+ _D3std8datetime7systime17unixTimeToStdTimeFNaNbNiNflZl@Base 12
+ _D3std8datetime7systime19fracSecsToISOStringFNaNbNfiiZAya@Base 12
+ _D3std8datetime7systime20DosFileTimeToSysTimeFNfkyCQBxQBw8timezone8TimeZoneZSQCxQCwQCq7SysTime@Base 12
+ _D3std8datetime7systime20SysTimeToDosFileTimeFNfMSQBwQBvQBp7SysTimeZk@Base 12
+ _D3std8datetime7systime5Clock6__initZ@Base 12
+ _D3std8datetime7systime5Clock6__vtblZ@Base 12
+ _D3std8datetime7systime5Clock7__ClassZ@Base 12
+ _D3std8datetime7systime5Clock__T11currStdTimeVE4core4time9ClockTypei0ZQBmFNbNdNiNeZl@Base 12
+ _D3std8datetime7systime5Clock__T8currTimeVE4core4time9ClockTypei0ZQBiFNbNfyCQCwQCv8timezone8TimeZoneZSQDwQDvQDp7SysTime@Base 12
+ _D3std8datetime7systime7SysTime10diffMonthsMxFNbNlNfMSQCaQBzQBtQBoZi@Base 12
+ _D3std8datetime7systime7SysTime10endOfMonthMxFNbNdNjNfZSQCcQCbQBvQBq@Base 12
+ _D3std8datetime7systime7SysTime10isLeapYearMxFNbNdNlNfZb@Base 12
+ _D3std8datetime7systime7SysTime10toTimeSpecMxFNaNbNlNfZS4core3sys5posix6signal8timespec@Base 12
+ _D3std8datetime7systime7SysTime11__xopEqualsMxFKxSQBwQBvQBpQBkZb@Base 12
+ _D3std8datetime7systime7SysTime11daysInMonthMxFNbNdNlNfZh@Base 12
+ _D3std8datetime7systime7SysTime11dstInEffectMxFNbNdNlNfZb@Base 12
+ _D3std8datetime7systime7SysTime11toISOStringMxFNbNlNfZAya@Base 12
+ _D3std8datetime7systime7SysTime11toLocalTimeMxFNaNbNlNfZSQCdQCcQBwQBr@Base 12
+ _D3std8datetime7systime7SysTime12InitTimeZone11dstInEffectMxFNbNiNlNflZb@Base 12
+ _D3std8datetime7systime7SysTime12InitTimeZone11utcOffsetAtMxFNbNiNlNflZS4core4time8Duration@Base 12
+ _D3std8datetime7systime7SysTime12InitTimeZone13_initTimeZoneyCQCiQChQCbQBwQBr@Base 12
+ _D3std8datetime7systime7SysTime12InitTimeZone6__ctorMyFNaNfZyCQCiQChQCbQBwQBr@Base 12
+ _D3std8datetime7systime7SysTime12InitTimeZone6__initZ@Base 12
+ _D3std8datetime7systime7SysTime12InitTimeZone6__vtblZ@Base 12
+ _D3std8datetime7systime7SysTime12InitTimeZone6hasDSTMxFNbNdNiNfZb@Base 12
+ _D3std8datetime7systime7SysTime12InitTimeZone6opCallFNaNbNiNfZyCQCkQCjQCdQByQBt@Base 12
+ _D3std8datetime7systime7SysTime12InitTimeZone7__ClassZ@Base 12
+ _D3std8datetime7systime7SysTime12InitTimeZone7tzToUTCMxFNbNiNlNflZl@Base 12
+ _D3std8datetime7systime7SysTime12InitTimeZone7utcToTZMxFNbNiNlNflZl@Base 12
+ _D3std8datetime7systime7SysTime12fromUnixTimeFNaNbNflyCQCbQCa8timezone8TimeZoneZSQDbQDaQCuQCp@Base 12
+ _D3std8datetime7systime7SysTime12modJulianDayMxFNbNdNlNfZl@Base 12
+ _D3std8datetime7systime7SysTime14toISOExtStringMxFNbNlNfiZAya@Base 12
+ _D3std8datetime7systime7SysTime14toSimpleStringMxFNbNlNfZAya@Base 12
+ _D3std8datetime7systime7SysTime17dayOfGregorianCalMFNbNdNlNfiZv@Base 12
+ _D3std8datetime7systime7SysTime17dayOfGregorianCalMxFNbNdNlNfZi@Base 12
+ _D3std8datetime7systime7SysTime3dayMFNdNlNfiZv@Base 12
+ _D3std8datetime7systime7SysTime3dayMxFNbNdNlNfZh@Base 12
+ _D3std8datetime7systime7SysTime3maxFNaNbNdNfZSQBsQBrQBlQBg@Base 12
+ _D3std8datetime7systime7SysTime3minFNaNbNdNfZSQBsQBrQBlQBg@Base 12
+ _D3std8datetime7systime7SysTime4hourMFNdNlNfiZv@Base 12
+ _D3std8datetime7systime7SysTime4hourMxFNbNdNlNfZh@Base 12
+ _D3std8datetime7systime7SysTime4isADMxFNbNdNlNfZb@Base 12
+ _D3std8datetime7systime7SysTime4toTMMxFNbNlNfZS4core3sys5posix4stdc4time2tm@Base 12
+ _D3std8datetime7systime7SysTime4yearMFNdNlNfiZv@Base 12
+ _D3std8datetime7systime7SysTime4yearMxFNbNdNlNfZs@Base 12
+ _D3std8datetime7systime7SysTime5monthMFNdNlNfEQBsQBr4date5MonthZv@Base 12
+ _D3std8datetime7systime7SysTime5monthMxFNbNdNlNfZEQBwQBv4date5Month@Base 12
+ _D3std8datetime7systime7SysTime5toUTCMxFNaNbNlNfZSQBwQBvQBpQBk@Base 12
+ _D3std8datetime7systime7SysTime6__ctorMFNaNbNcNjNflNkMyCQCcQCb8timezone8TimeZoneZSQDcQDbQCvQCq@Base 12
+ _D3std8datetime7systime7SysTime6__ctorMFNbNcNjNfSQBvQBu4date4DateNkMyCQCqQCp8timezone8TimeZoneZSQDqQDpQDjQDe@Base 12
+ _D3std8datetime7systime7SysTime6__ctorMFNbNcNjNfSQBvQBu4date8DateTimeNkMyCQCuQCt8timezone8TimeZoneZSQDuQDtQDnQDi@Base 12
+ _D3std8datetime7systime7SysTime6__ctorMFNcNjNfSQBtQBs4date8DateTimeS4core4time8DurationNkMyCQDmQDl8timezone8TimeZoneZSQEmQElQEfQEa@Base 12
+ _D3std8datetime7systime7SysTime6__initZ@Base 12
+ _D3std8datetime7systime7SysTime6minuteMFNdNlNfiZv@Base 12
+ _D3std8datetime7systime7SysTime6minuteMxFNbNdNlNfZh@Base 12
+ _D3std8datetime7systime7SysTime6secondMFNdNlNfiZv@Base 12
+ _D3std8datetime7systime7SysTime6secondMxFNbNdNlNfZh@Base 12
+ _D3std8datetime7systime7SysTime6toHashMxFNaNbNiNlNfZm@Base 12
+ _D3std8datetime7systime7SysTime6yearBCMFNdNlNfiZv@Base 12
+ _D3std8datetime7systime7SysTime6yearBCMxFNdNlNfZt@Base 12
+ _D3std8datetime7systime7SysTime7adjTimeMFNbNdNlNflZv@Base 12
+ _D3std8datetime7systime7SysTime7adjTimeMxFNbNdNlNfZl@Base 12
+ _D3std8datetime7systime7SysTime7isoWeekMxFNbNdNlNfZh@Base 12
+ _D3std8datetime7systime7SysTime7stdTimeMFNaNbNdNlNflZv@Base 12
+ _D3std8datetime7systime7SysTime7stdTimeMxFNaNbNdNiNlNfZl@Base 12
+ _D3std8datetime7systime7SysTime8__xopCmpMxFKxSQBsQBrQBlQBgZi@Base 12
+ _D3std8datetime7systime7SysTime8fracSecsMFNdNlNfS4core4time8DurationZv@Base 12
+ _D3std8datetime7systime7SysTime8fracSecsMxFNbNdNlNfZS4core4time8Duration@Base 12
+ _D3std8datetime7systime7SysTime8timezoneMFNaNbNdNlNfyCQCaQBzQBd8TimeZoneZv@Base 12
+ _D3std8datetime7systime7SysTime8timezoneMxFNaNbNdNjNfZyCQCcQCbQBf8TimeZone@Base 12
+ _D3std8datetime7systime7SysTime8toStringMxFNbNlNfZAya@Base 12
+ _D3std8datetime7systime7SysTime9_timezoneMFNaNbNdNiNlNfNkMyCQCgQCf8timezone8TimeZoneZv@Base 12
+ _D3std8datetime7systime7SysTime9_timezoneMxFNaNbNdNiNfZyCQCdQCc8timezone8TimeZone@Base 12
+ _D3std8datetime7systime7SysTime9dayOfWeekMxFNbNdNlNfZEQCaQBz4date9DayOfWeek@Base 12
+ _D3std8datetime7systime7SysTime9dayOfYearMFNdNlNfiZv@Base 12
+ _D3std8datetime7systime7SysTime9dayOfYearMxFNbNdNlNfZt@Base 12
+ _D3std8datetime7systime7SysTime9julianDayMxFNbNdNlNfZl@Base 12
+ _D3std8datetime7systime7SysTime9toOtherTZMxFNaNbNlNfyCQCaQBz8timezone8TimeZoneZSQDaQCzQCtQCo@Base 12
+ _D3std8datetime7systime7SysTime9toTimeValMxFNaNbNlNfZS4core3sys5posixQk4time7timeval@Base 12
+ _D3std8datetime7systime7SysTime9utcOffsetMxFNbNdNlNfZS4core4time8Duration@Base 12
+ _D3std8datetime7systime7SysTime__T10toUnixTimeTlZQpMxFNaNbNiNlNfZl@Base 12
+ _D3std8datetime7systime7SysTime__T11toISOStringTSQBv5array__T8AppenderTAyaZQoZQBsMxFNlNfKQBpZv@Base 12
+ _D3std8datetime7systime7SysTime__T14toISOExtStringTSQBy5array__T8AppenderTAyaZQoZQBvMxFNlNfKQBpiZv@Base 12
+ _D3std8datetime7systime7SysTime__T14toSimpleStringTSQBy5array__T8AppenderTAyaZQoZQBvMxFNlNfKQBpZv@Base 12
+ _D3std8datetime7systime7SysTime__T5opCmpZQhMxFNaNbNiNlNfKxSQCfQCeQByQBtZi@Base 12
+ _D3std8datetime7systime7SysTime__T6opCastTSQBpQBo4date4DateZQBaMxFNbNlNfZQBf@Base 12
+ _D3std8datetime7systime7SysTime__T6opCastTSQBpQBo4date8DateTimeZQBeMxFNbNlNfZQBj@Base 12
+ _D3std8datetime7systime7SysTime__T8opAssignZQkMFNaNbNcNiNjNfKxSQCjQCiQCcQBxZSQCxQCwQCqQCl@Base 12
+ _D3std8datetime7systime7SysTime__T8opEqualsZQkMxFNaNbNiNlNfKxSQCiQChQCbQBwZb@Base 12
+ _D3std8datetime7systime__T17stdTimeToUnixTimeTlZQwFNaNbNiNflZl@Base 12
+ _D3std8datetime7systime__T18getUnitsFromHNSecsVAyaa4_64617973ZQBkFNaNbNiNflZl@Base 12
+ _D3std8datetime7systime__T18getUnitsFromHNSecsVAyaa5_686f757273ZQBmFNaNbNiNflZl@Base 12
+ _D3std8datetime7systime__T18getUnitsFromHNSecsVAyaa7_6d696e75746573ZQBqFNaNbNiNflZl@Base 12
+ _D3std8datetime7systime__T18getUnitsFromHNSecsVAyaa7_7365636f6e6473ZQBqFNaNbNiNflZl@Base 12
+ _D3std8datetime7systime__T19fracSecsToISOStringTSQBv5array__T8AppenderTAyaZQoZQCaFNaNbNfKQBpiiZv@Base 12
+ _D3std8datetime7systime__T21removeUnitsFromHNSecsVAyaa4_64617973ZQBnFNaNbNiNflZl@Base 12
+ _D3std8datetime7systime__T21removeUnitsFromHNSecsVAyaa5_686f757273ZQBpFNaNbNiNflZl@Base 12
+ _D3std8datetime7systime__T21removeUnitsFromHNSecsVAyaa7_6d696e75746573ZQBtFNaNbNiNflZl@Base 12
+ _D3std8datetime7systime__T21removeUnitsFromHNSecsVAyaa7_7365636f6e6473ZQBtFNaNbNiNflZl@Base 12
+ _D3std8datetime8interval11__moduleRefZ@Base 12
+ _D3std8datetime8interval12__ModuleInfoZ@Base 12
+ _D3std8datetime8timezone11__moduleRefZ@Base 12
+ _D3std8datetime8timezone11setTZEnvVarFNbNeAyaZv@Base 12
+ _D3std8datetime8timezone12__ModuleInfoZ@Base 12
+ _D3std8datetime8timezone13PosixTimeZone10LeapSecond6__ctorMFNaNcNfliZSQCqQCpQCjQCdQBr@Base 12
+ _D3std8datetime8timezone13PosixTimeZone10LeapSecond6__initZ@Base 12
+ _D3std8datetime8timezone13PosixTimeZone10TempTTInfo6__ctorMFNaNcNfibhZSQCrQCqQCkQCeQBs@Base 12
+ _D3std8datetime8timezone13PosixTimeZone10TempTTInfo6__initZ@Base 12
+ _D3std8datetime8timezone13PosixTimeZone10Transition6__ctorMFNaNcNflPySQCqQCpQCjQCd6TTInfoZSQDlQDkQDeQCyQCm@Base 12
+ _D3std8datetime8timezone13PosixTimeZone10Transition6__initZ@Base 12
+ _D3std8datetime8timezone13PosixTimeZone11dstInEffectMxFNbNlNflZb@Base 12
+ _D3std8datetime8timezone13PosixTimeZone11getTimeZoneFNeAyaQdZyCQCjQCiQCcQBw@Base 12
+ _D3std8datetime8timezone13PosixTimeZone14TempTransition6__ctorMFNaNcNflPySQCuQCtQCnQCh6TTInfoPSQDpQDoQDiQDc14TransitionTypeZSQEtQEsQEmQEgQDu@Base 12
+ _D3std8datetime8timezone13PosixTimeZone14TempTransition6__initZ@Base 12
+ _D3std8datetime8timezone13PosixTimeZone14TransitionType6__ctorMFNaNcNfbbZSQCuQCtQCnQChQBv@Base 12
+ _D3std8datetime8timezone13PosixTimeZone14TransitionType6__initZ@Base 12
+ _D3std8datetime8timezone13PosixTimeZone19_enforceValidTZFileFNaNfbmZv@Base 12
+ _D3std8datetime8timezone13PosixTimeZone19getInstalledTZNamesFNfAyaQdZAQh@Base 12
+ _D3std8datetime8timezone13PosixTimeZone20calculateLeapSecondsMxFNaNbNlNflZi@Base 12
+ _D3std8datetime8timezone13PosixTimeZone6TTInfo11__xopEqualsMxFKxSQClQCkQCeQByQBmZb@Base 12
+ _D3std8datetime8timezone13PosixTimeZone6TTInfo6__ctorMyFNaNcNfxSQCkQCjQCdQBx10TempTTInfoAyaZySQDoQDnQDhQDbQCp@Base 12
+ _D3std8datetime8timezone13PosixTimeZone6TTInfo6__initZ@Base 12
+ _D3std8datetime8timezone13PosixTimeZone6TTInfo9__xtoHashFNbNeKxSQCkQCjQCdQBxQBlZm@Base 12
+ _D3std8datetime8timezone13PosixTimeZone6__ctorMyFNaNfyASQCcQCbQBvQBp10TransitionyASQDdQDcQCwQCq10LeapSecondAyaQdQfbZyCQEmQElQEfQDz@Base 12
+ _D3std8datetime8timezone13PosixTimeZone6__initZ@Base 12
+ _D3std8datetime8timezone13PosixTimeZone6__vtblZ@Base 12
+ _D3std8datetime8timezone13PosixTimeZone6hasDSTMxFNbNdNfZb@Base 12
+ _D3std8datetime8timezone13PosixTimeZone7__ClassZ@Base 12
+ _D3std8datetime8timezone13PosixTimeZone7tzToUTCMxFNbNlNflZl@Base 12
+ _D3std8datetime8timezone13PosixTimeZone7utcToTZMxFNbNlNflZl@Base 12
+ _D3std8datetime8timezone13PosixTimeZone__T7readValTAaZQmFNeKSQCh5stdio4FilemZQBa@Base 12
+ _D3std8datetime8timezone13PosixTimeZone__T7readValTAhZQmFNeKSQCh5stdio4FilemZQBa@Base 12
+ _D3std8datetime8timezone13PosixTimeZone__T7readValTSQByQBxQBrQBl10TempTTInfoZQBjFNfKSQDf5stdio4FileZQBx@Base 12
+ _D3std8datetime8timezone13PosixTimeZone__T7readValTaZQlFNeKSQCg5stdio4FileZa@Base 12
+ _D3std8datetime8timezone13PosixTimeZone__T7readValTbZQlFNeKSQCg5stdio4FileZb@Base 12
+ _D3std8datetime8timezone13PosixTimeZone__T7readValThZQlFNeKSQCg5stdio4FileZh@Base 12
+ _D3std8datetime8timezone13PosixTimeZone__T7readValTiZQlFNeKSQCg5stdio4FileZi@Base 12
+ _D3std8datetime8timezone13PosixTimeZone__T7readValTlZQlFNeKSQCg5stdio4FileZl@Base 12
+ _D3std8datetime8timezone13TZConversions11__xopEqualsMxFKxSQCeQCdQBxQBrZb@Base 12
+ _D3std8datetime8timezone13TZConversions6__initZ@Base 12
+ _D3std8datetime8timezone13TZConversions9__xtoHashFNbNeKxSQCdQCcQBwQBqZm@Base 12
+ _D3std8datetime8timezone13clearTZEnvVarFNbNeZv@Base 12
+ _D3std8datetime8timezone14SimpleTimeZone11dstInEffectMxFNbNlNflZb@Base 12
+ _D3std8datetime8timezone14SimpleTimeZone11toISOStringFNaNfS4core4time8DurationZAya@Base 12
+ _D3std8datetime8timezone14SimpleTimeZone11utcOffsetAtMxFNbNlNflZS4core4time8Duration@Base 12
+ _D3std8datetime8timezone14SimpleTimeZone14toISOExtStringFNaNfS4core4time8DurationZAya@Base 12
+ _D3std8datetime8timezone14SimpleTimeZone6__ctorMyFNaNfS4core4time8DurationAyaZyCQDaQCzQCtQCn@Base 12
+ _D3std8datetime8timezone14SimpleTimeZone6__initZ@Base 12
+ _D3std8datetime8timezone14SimpleTimeZone6__vtblZ@Base 12
+ _D3std8datetime8timezone14SimpleTimeZone6hasDSTMxFNbNdNfZb@Base 12
+ _D3std8datetime8timezone14SimpleTimeZone7__ClassZ@Base 12
+ _D3std8datetime8timezone14SimpleTimeZone7tzToUTCMxFNbNlNflZl@Base 12
+ _D3std8datetime8timezone14SimpleTimeZone7utcToTZMxFNbNlNflZl@Base 12
+ _D3std8datetime8timezone14SimpleTimeZone9utcOffsetMxFNaNbNdNfZS4core4time8Duration@Base 12
+ _D3std8datetime8timezone14SimpleTimeZone__T11toISOStringTSQCe5array__T8AppenderTAyaZQoZQBsFNaNfKQBnS4core4time8DurationZv@Base 12
+ _D3std8datetime8timezone14SimpleTimeZone__T14toISOExtStringTSQCh5array__T8AppenderTAyaZQoZQBvFNaNfKQBnS4core4time8DurationZv@Base 12
+ _D3std8datetime8timezone18parseTZConversionsFNaNfAyaZSQCaQBzQBt13TZConversions@Base 12
+ _D3std8datetime8timezone3UTC11dstInEffectMxFNbNlNflZb@Base 12
+ _D3std8datetime8timezone3UTC11utcOffsetAtMxFNbNlNflZS4core4time8Duration@Base 12
+ _D3std8datetime8timezone3UTC4_utcyCQBhQBgQBaQu@Base 12
+ _D3std8datetime8timezone3UTC6__ctorMyFNaNfZyCQBrQBqQBkQBe@Base 12
+ _D3std8datetime8timezone3UTC6__initZ@Base 12
+ _D3std8datetime8timezone3UTC6__vtblZ@Base 12
+ _D3std8datetime8timezone3UTC6hasDSTMxFNbNdNfZb@Base 12
+ _D3std8datetime8timezone3UTC6opCallFNaNbNfZyCQBrQBqQBkQBe@Base 12
+ _D3std8datetime8timezone3UTC7__ClassZ@Base 12
+ _D3std8datetime8timezone3UTC7tzToUTCMxFNbNlNflZl@Base 12
+ _D3std8datetime8timezone3UTC7utcToTZMxFNbNlNflZl@Base 12
+ _D3std8datetime8timezone8TimeZone11_getOldNameFNaNbNfAyaZQe@Base 12
+ _D3std8datetime8timezone8TimeZone11utcOffsetAtMxFNbNlNflZS4core4time8Duration@Base 12
+ _D3std8datetime8timezone8TimeZone4nameMxFNbNdNfZAya@Base 12
+ _D3std8datetime8timezone8TimeZone6__ctorMyFNaNfAyaQdQfZyCQCdQCcQBwQBq@Base 12
+ _D3std8datetime8timezone8TimeZone6__initZ@Base 12
+ _D3std8datetime8timezone8TimeZone6__vtblZ@Base 12
+ _D3std8datetime8timezone8TimeZone7__ClassZ@Base 12
+ _D3std8datetime8timezone8TimeZone7dstNameMxFNbNdNlNfZAya@Base 12
+ _D3std8datetime8timezone8TimeZone7stdNameMxFNbNdNlNfZAya@Base 12
+ _D3std8datetime8timezone9LocalTime11dstInEffectMxFNbNlNelZb@Base 12
+ _D3std8datetime8timezone9LocalTime6__ctorMyFNaNfZyCQBxQBwQBqQBk@Base 12
+ _D3std8datetime8timezone9LocalTime6__initZ@Base 12
+ _D3std8datetime8timezone9LocalTime6__vtblZ@Base 12
+ _D3std8datetime8timezone9LocalTime6hasDSTMxFNbNdNeZb@Base 12
+ _D3std8datetime8timezone9LocalTime6opCallFNaNbNeZyCQBxQBwQBqQBk@Base 12
+ _D3std8datetime8timezone9LocalTime7__ClassZ@Base 12
+ _D3std8datetime8timezone9LocalTime7dstNameMxFNbNdNlNeZAya@Base 12
+ _D3std8datetime8timezone9LocalTime7stdNameMxFNbNdNlNeZAya@Base 12
+ _D3std8datetime8timezone9LocalTime7tzToUTCMxFNbNlNelZl@Base 12
+ _D3std8datetime8timezone9LocalTime7utcToTZMxFNbNlNelZl@Base 12
+ _D3std8datetime8timezone9LocalTime9singletonFNeZ5guardOb@Base 12
+ _D3std8datetime8timezone9LocalTime9singletonFNeZ8instanceyCQCfQCeQByQBs@Base 12
+ _D3std8datetime8timezone9LocalTime9singletonFNeZ9__lambda3FNbNiNfZb@Base 12
+ _D3std8datetime8timezone9LocalTime9singletonFNeZyCQBwQBvQBpQBj@Base 12
+ _D3std8datetime9stopwatch11__moduleRefZ@Base 12
+ _D3std8datetime9stopwatch12__ModuleInfoZ@Base 12
+ _D3std8datetime9stopwatch9StopWatch14setTimeElapsedMFNbNiNfS4core4time8DurationZv@Base 12
+ _D3std8datetime9stopwatch9StopWatch4peekMxFNbNiNfZS4core4time8Duration@Base 12
+ _D3std8datetime9stopwatch9StopWatch4stopMFNbNiNfZv@Base 12
+ _D3std8datetime9stopwatch9StopWatch5resetMFNbNiNfZv@Base 12
+ _D3std8datetime9stopwatch9StopWatch5startMFNbNiNfZv@Base 12
+ _D3std8datetime9stopwatch9StopWatch6__ctorMFNbNcNiNfEQBz8typecons__T4FlagVAyaa9_6175746f5374617274ZQBfZSQDyQDxQDrQDk@Base 12
+ _D3std8datetime9stopwatch9StopWatch6__initZ@Base 12
+ _D3std8datetime9stopwatch9StopWatch7runningMxFNaNbNdNiNfZb@Base 12
+ _D3std8demangle11__moduleRefZ@Base 12
+ _D3std8demangle12__ModuleInfoZ@Base 12
+ _D3std8demangleQjFNaNbNfAyaZQe@Base 12
+ _D3std8encoding11__moduleRefZ@Base 12
+ _D3std8encoding12__ModuleInfoZ@Base 12
+ _D3std8encoding14EncodingScheme18supportedFactoriesHAyaQd@Base 12
+ _D3std8encoding14EncodingScheme6__initZ@Base 12
+ _D3std8encoding14EncodingScheme6__vtblZ@Base 12
+ _D3std8encoding14EncodingScheme6createFAyaZ11initializedOb@Base 12
+ _D3std8encoding14EncodingScheme6createFAyaZ24registerDefaultEncodingsFZb@Base 12
+ _D3std8encoding14EncodingScheme6createFAyaZCQBqQBpQBj@Base 12
+ _D3std8encoding14EncodingScheme7__ClassZ@Base 12
+ _D3std8encoding14EncodingScheme7isValidMFAxhZb@Base 12
+ _D3std8encoding14EncodingScheme8registerFAyaZv@Base 12
+ _D3std8encoding14EncodingScheme9supportedHAyaPFZCQBvQBuQBo@Base 12
+ _D3std8encoding14EncodingScheme__T8registerHTCQBsQBr18EncodingSchemeUtf8ZQBnFZ9__lambda5FNaNbNfZCQDrQDqQDk@Base 12
+ _D3std8encoding14EncodingScheme__T8registerHTCQBsQBr18EncodingSchemeUtf8ZQBnFZv@Base 12
+ _D3std8encoding14EncodingScheme__T8registerHTCQBsQBr19EncodingSchemeASCIIZQBoFZ9__lambda5FNaNbNfZCQDsQDrQDl@Base 12
+ _D3std8encoding14EncodingScheme__T8registerHTCQBsQBr19EncodingSchemeASCIIZQBoFZv@Base 12
+ _D3std8encoding14EncodingScheme__T8registerHTCQBsQBr20EncodingSchemeLatin1ZQBpFZ9__lambda5FNaNbNfZCQDtQDsQDm@Base 12
+ _D3std8encoding14EncodingScheme__T8registerHTCQBsQBr20EncodingSchemeLatin1ZQBpFZv@Base 12
+ _D3std8encoding14EncodingScheme__T8registerHTCQBsQBr20EncodingSchemeLatin2ZQBpFZ9__lambda5FNaNbNfZCQDtQDsQDm@Base 12
+ _D3std8encoding14EncodingScheme__T8registerHTCQBsQBr20EncodingSchemeLatin2ZQBpFZv@Base 12
+ _D3std8encoding14EncodingScheme__T8registerHTCQBsQBr25EncodingSchemeUtf16NativeZQBuFZ9__lambda5FNaNbNfZCQDyQDxQDr@Base 12
+ _D3std8encoding14EncodingScheme__T8registerHTCQBsQBr25EncodingSchemeUtf16NativeZQBuFZv@Base 12
+ _D3std8encoding14EncodingScheme__T8registerHTCQBsQBr25EncodingSchemeUtf32NativeZQBuFZ9__lambda5FNaNbNfZCQDyQDxQDr@Base 12
+ _D3std8encoding14EncodingScheme__T8registerHTCQBsQBr25EncodingSchemeUtf32NativeZQBuFZv@Base 12
+ _D3std8encoding14EncodingScheme__T8registerHTCQBsQBr25EncodingSchemeWindows1250ZQBuFZ9__lambda5FNaNbNfZCQDyQDxQDr@Base 12
+ _D3std8encoding14EncodingScheme__T8registerHTCQBsQBr25EncodingSchemeWindows1250ZQBuFZv@Base 12
+ _D3std8encoding14EncodingScheme__T8registerHTCQBsQBr25EncodingSchemeWindows1251ZQBuFZ9__lambda5FNaNbNfZCQDyQDxQDr@Base 12
+ _D3std8encoding14EncodingScheme__T8registerHTCQBsQBr25EncodingSchemeWindows1251ZQBuFZv@Base 12
+ _D3std8encoding14EncodingScheme__T8registerHTCQBsQBr25EncodingSchemeWindows1252ZQBuFZ9__lambda5FNaNbNfZCQDyQDxQDr@Base 12
+ _D3std8encoding14EncodingScheme__T8registerHTCQBsQBr25EncodingSchemeWindows1252ZQBuFZv@Base 12
+ _D3std8encoding16isValidCodePointFNaNbNiNfwZb@Base 12
+ _D3std8encoding17EncodingException6__ctorMFNaNfAyaZCQByQBxQBr@Base 12
+ _D3std8encoding17EncodingException6__initZ@Base 12
+ _D3std8encoding17EncodingException6__vtblZ@Base 12
+ _D3std8encoding17EncodingException7__ClassZ@Base 12
+ _D3std8encoding18EncodingSchemeUtf810safeDecodeMxFNaNbNiNfKAxhZw@Base 12
+ _D3std8encoding18EncodingSchemeUtf813encodedLengthMxFNaNbNiNfwZm@Base 12
+ _D3std8encoding18EncodingSchemeUtf819replacementSequenceMxFNaNbNdNiNfZAyh@Base 12
+ _D3std8encoding18EncodingSchemeUtf85namesMxFNaNbNfZAAya@Base 12
+ _D3std8encoding18EncodingSchemeUtf86__initZ@Base 12
+ _D3std8encoding18EncodingSchemeUtf86__vtblZ@Base 12
+ _D3std8encoding18EncodingSchemeUtf86decodeMxFNaNbNiNfKAxhZw@Base 12
+ _D3std8encoding18EncodingSchemeUtf86encodeMxFNaNbNiNfwAhZm@Base 12
+ _D3std8encoding18EncodingSchemeUtf87__ClassZ@Base 12
+ _D3std8encoding18EncodingSchemeUtf88toStringMxFNaNbNiNfZAya@Base 12
+ _D3std8encoding18EncodingSchemeUtf89canEncodeMxFNaNbNiNfwZb@Base 12
+ _D3std8encoding19EncodingSchemeASCII10safeDecodeMxFNaNbNiNfKAxhZw@Base 12
+ _D3std8encoding19EncodingSchemeASCII13encodedLengthMxFNaNbNiNfwZm@Base 12
+ _D3std8encoding19EncodingSchemeASCII19replacementSequenceMxFNaNbNdNiNfZAyh@Base 12
+ _D3std8encoding19EncodingSchemeASCII5namesMxFNaNbNfZAAya@Base 12
+ _D3std8encoding19EncodingSchemeASCII6__initZ@Base 12
+ _D3std8encoding19EncodingSchemeASCII6__vtblZ@Base 12
+ _D3std8encoding19EncodingSchemeASCII6decodeMxFNaNbNiNfKAxhZw@Base 12
+ _D3std8encoding19EncodingSchemeASCII6encodeMxFNaNbNiNfwAhZm@Base 12
+ _D3std8encoding19EncodingSchemeASCII7__ClassZ@Base 12
+ _D3std8encoding19EncodingSchemeASCII8toStringMxFNaNbNiNfZAya@Base 12
+ _D3std8encoding19EncodingSchemeASCII9canEncodeMxFNaNbNiNfwZb@Base 12
+ _D3std8encoding20EncodingSchemeLatin110safeDecodeMxFNaNbNiNfKAxhZw@Base 12
+ _D3std8encoding20EncodingSchemeLatin113encodedLengthMxFNaNbNiNfwZm@Base 12
+ _D3std8encoding20EncodingSchemeLatin119replacementSequenceMxFNaNbNdNiNfZAyh@Base 12
+ _D3std8encoding20EncodingSchemeLatin15namesMxFNaNbNfZAAya@Base 12
+ _D3std8encoding20EncodingSchemeLatin16__initZ@Base 12
+ _D3std8encoding20EncodingSchemeLatin16__vtblZ@Base 12
+ _D3std8encoding20EncodingSchemeLatin16decodeMxFNaNbNiNfKAxhZw@Base 12
+ _D3std8encoding20EncodingSchemeLatin16encodeMxFNaNbNiNfwAhZm@Base 12
+ _D3std8encoding20EncodingSchemeLatin17__ClassZ@Base 12
+ _D3std8encoding20EncodingSchemeLatin18toStringMxFNaNbNiNfZAya@Base 12
+ _D3std8encoding20EncodingSchemeLatin19canEncodeMxFNaNbNiNfwZb@Base 12
+ _D3std8encoding20EncodingSchemeLatin210safeDecodeMxFNaNbNiNfKAxhZw@Base 12
+ _D3std8encoding20EncodingSchemeLatin213encodedLengthMxFNaNbNiNfwZm@Base 12
+ _D3std8encoding20EncodingSchemeLatin219replacementSequenceMxFNaNbNdNiNfZAyh@Base 12
+ _D3std8encoding20EncodingSchemeLatin25namesMxFNaNbNfZAAya@Base 12
+ _D3std8encoding20EncodingSchemeLatin26__initZ@Base 12
+ _D3std8encoding20EncodingSchemeLatin26__vtblZ@Base 12
+ _D3std8encoding20EncodingSchemeLatin26decodeMxFNaNbNiNfKAxhZw@Base 12
+ _D3std8encoding20EncodingSchemeLatin26encodeMxFNaNbNiNfwAhZm@Base 12
+ _D3std8encoding20EncodingSchemeLatin27__ClassZ@Base 12
+ _D3std8encoding20EncodingSchemeLatin28toStringMxFNaNbNiNfZAya@Base 12
+ _D3std8encoding20EncodingSchemeLatin29canEncodeMxFNaNbNiNfwZb@Base 12
+ _D3std8encoding25EncodingSchemeUtf16Native10safeDecodeMxFNaNbNiNfKAxhZw@Base 12
+ _D3std8encoding25EncodingSchemeUtf16Native13encodedLengthMxFNaNbNiNfwZm@Base 12
+ _D3std8encoding25EncodingSchemeUtf16Native19replacementSequenceMxFNaNbNdNiNfZAyh@Base 12
+ _D3std8encoding25EncodingSchemeUtf16Native5namesMxFNaNbNfZAAya@Base 12
+ _D3std8encoding25EncodingSchemeUtf16Native6__initZ@Base 12
+ _D3std8encoding25EncodingSchemeUtf16Native6__vtblZ@Base 12
+ _D3std8encoding25EncodingSchemeUtf16Native6decodeMxFNaNbNiNfKAxhZw@Base 12
+ _D3std8encoding25EncodingSchemeUtf16Native6encodeMxFNaNbNiNfwAhZm@Base 12
+ _D3std8encoding25EncodingSchemeUtf16Native7__ClassZ@Base 12
+ _D3std8encoding25EncodingSchemeUtf16Native8toStringMxFNaNbNiNfZAya@Base 12
+ _D3std8encoding25EncodingSchemeUtf16Native9canEncodeMxFNaNbNiNfwZb@Base 12
+ _D3std8encoding25EncodingSchemeUtf32Native10safeDecodeMxFNaNbNiNfKAxhZw@Base 12
+ _D3std8encoding25EncodingSchemeUtf32Native13encodedLengthMxFNaNbNiNfwZm@Base 12
+ _D3std8encoding25EncodingSchemeUtf32Native19replacementSequenceMxFNaNbNdNiNfZAyh@Base 12
+ _D3std8encoding25EncodingSchemeUtf32Native5namesMxFNaNbNfZAAya@Base 12
+ _D3std8encoding25EncodingSchemeUtf32Native6__initZ@Base 12
+ _D3std8encoding25EncodingSchemeUtf32Native6__vtblZ@Base 12
+ _D3std8encoding25EncodingSchemeUtf32Native6decodeMxFNaNbNiNfKAxhZw@Base 12
+ _D3std8encoding25EncodingSchemeUtf32Native6encodeMxFNaNbNiNfwAhZm@Base 12
+ _D3std8encoding25EncodingSchemeUtf32Native7__ClassZ@Base 12
+ _D3std8encoding25EncodingSchemeUtf32Native8toStringMxFNaNbNiNfZAya@Base 12
+ _D3std8encoding25EncodingSchemeUtf32Native9canEncodeMxFNaNbNiNfwZb@Base 12
+ _D3std8encoding25EncodingSchemeWindows125010safeDecodeMxFNaNbNiNfKAxhZw@Base 12
+ _D3std8encoding25EncodingSchemeWindows125013encodedLengthMxFNaNbNiNfwZm@Base 12
+ _D3std8encoding25EncodingSchemeWindows125019replacementSequenceMxFNaNbNdNiNfZAyh@Base 12
+ _D3std8encoding25EncodingSchemeWindows12505namesMxFNaNbNfZAAya@Base 12
+ _D3std8encoding25EncodingSchemeWindows12506__initZ@Base 12
+ _D3std8encoding25EncodingSchemeWindows12506__vtblZ@Base 12
+ _D3std8encoding25EncodingSchemeWindows12506decodeMxFNaNbNiNfKAxhZw@Base 12
+ _D3std8encoding25EncodingSchemeWindows12506encodeMxFNaNbNiNfwAhZm@Base 12
+ _D3std8encoding25EncodingSchemeWindows12507__ClassZ@Base 12
+ _D3std8encoding25EncodingSchemeWindows12508toStringMxFNaNbNiNfZAya@Base 12
+ _D3std8encoding25EncodingSchemeWindows12509canEncodeMxFNaNbNiNfwZb@Base 12
+ _D3std8encoding25EncodingSchemeWindows125110safeDecodeMxFNaNbNiNfKAxhZw@Base 12
+ _D3std8encoding25EncodingSchemeWindows125113encodedLengthMxFNaNbNiNfwZm@Base 12
+ _D3std8encoding25EncodingSchemeWindows125119replacementSequenceMxFNaNbNdNiNfZAyh@Base 12
+ _D3std8encoding25EncodingSchemeWindows12515namesMxFNaNbNfZAAya@Base 12
+ _D3std8encoding25EncodingSchemeWindows12516__initZ@Base 12
+ _D3std8encoding25EncodingSchemeWindows12516__vtblZ@Base 12
+ _D3std8encoding25EncodingSchemeWindows12516decodeMxFNaNbNiNfKAxhZw@Base 12
+ _D3std8encoding25EncodingSchemeWindows12516encodeMxFNaNbNiNfwAhZm@Base 12
+ _D3std8encoding25EncodingSchemeWindows12517__ClassZ@Base 12
+ _D3std8encoding25EncodingSchemeWindows12518toStringMxFNaNbNiNfZAya@Base 12
+ _D3std8encoding25EncodingSchemeWindows12519canEncodeMxFNaNbNiNfwZb@Base 12
+ _D3std8encoding25EncodingSchemeWindows125210safeDecodeMxFNaNbNiNfKAxhZw@Base 12
+ _D3std8encoding25EncodingSchemeWindows125213encodedLengthMxFNaNbNiNfwZm@Base 12
+ _D3std8encoding25EncodingSchemeWindows125219replacementSequenceMxFNaNbNdNiNfZAyh@Base 12
+ _D3std8encoding25EncodingSchemeWindows12525namesMxFNaNbNfZAAya@Base 12
+ _D3std8encoding25EncodingSchemeWindows12526__initZ@Base 12
+ _D3std8encoding25EncodingSchemeWindows12526__vtblZ@Base 12
+ _D3std8encoding25EncodingSchemeWindows12526decodeMxFNaNbNiNfKAxhZw@Base 12
+ _D3std8encoding25EncodingSchemeWindows12526encodeMxFNaNbNiNfwAhZm@Base 12
+ _D3std8encoding25EncodingSchemeWindows12527__ClassZ@Base 12
+ _D3std8encoding25EncodingSchemeWindows12528toStringMxFNaNbNiNfZAya@Base 12
+ _D3std8encoding25EncodingSchemeWindows12529canEncodeMxFNaNbNiNfwZb@Base 12
+ _D3std8encoding29UnrecognizedEncodingException6__ctorMFNaNfAyaZCQCkQCjQCd@Base 12
+ _D3std8encoding29UnrecognizedEncodingException6__initZ@Base 12
+ _D3std8encoding29UnrecognizedEncodingException6__vtblZ@Base 12
+ _D3std8encoding29UnrecognizedEncodingException7__ClassZ@Base 12
+ _D3std8encoding8bomTableyASQz8typecons__T5TupleTEQBvQBu3BOMVAyaa6_736368656d61TAhVQwa8_73657175656e6365ZQCl@Base 12
+ _D3std8encoding__T10safeDecodeTAxEQBgQBf10Latin1CharZQBjFNaNbNiNfKQBjZw@Base 12
+ _D3std8encoding__T10safeDecodeTAxEQBgQBf10Latin2CharZQBjFNaNbNiNfKQBjZw@Base 12
+ _D3std8encoding__T10safeDecodeTAxEQBgQBf15Windows1250CharZQBoFNaNbNiNfKQBoZw@Base 12
+ _D3std8encoding__T10safeDecodeTAxEQBgQBf15Windows1251CharZQBoFNaNbNiNfKQBoZw@Base 12
+ _D3std8encoding__T10safeDecodeTAxEQBgQBf15Windows1252CharZQBoFNaNbNiNfKQBoZw@Base 12
+ _D3std8encoding__T10safeDecodeTAxEQBgQBf9AsciiCharZQBhFNaNbNiNfKQBhZw@Base 12
+ _D3std8encoding__T10safeDecodeTAxaZQrFNaNbNiNfKQqZw@Base 12
+ _D3std8encoding__T10safeDecodeTAxuZQrFNaNbNiNfKQqZw@Base 12
+ _D3std8encoding__T10safeDecodeTAxwZQrFNaNbNiNfKQqZw@Base 12
+ _D3std8encoding__T13encodedLengthTEQBhQBg10Latin1CharZQBkFNaNbNiNfwZm@Base 12
+ _D3std8encoding__T13encodedLengthTEQBhQBg10Latin2CharZQBkFNaNbNiNfwZm@Base 12
+ _D3std8encoding__T13encodedLengthTEQBhQBg15Windows1250CharZQBpFNaNbNiNfwZm@Base 12
+ _D3std8encoding__T13encodedLengthTEQBhQBg15Windows1251CharZQBpFNaNbNiNfwZm@Base 12
+ _D3std8encoding__T13encodedLengthTEQBhQBg15Windows1252CharZQBpFNaNbNiNfwZm@Base 12
+ _D3std8encoding__T13encodedLengthTEQBhQBg9AsciiCharZQBiFNaNbNiNfwZm@Base 12
+ _D3std8encoding__T13encodedLengthTaZQsFNaNbNiNfwZm@Base 12
+ _D3std8encoding__T13encodedLengthTuZQsFNaNbNiNfwZm@Base 12
+ _D3std8encoding__T13encodedLengthTwZQsFNaNbNiNfwZm@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj10Latin1CharZ12encodingNameFNaNbNdNiNfZAya@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj10Latin1CharZ13encodedLengthFNaNbNiNfwZm@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj10Latin1CharZ15isValidCodeUnitFNaNbNiNfQBuZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj10Latin1CharZ19replacementSequenceFNaNbNdNiNfZAyEQDnQDmQCd@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj10Latin1CharZ9__mixin1313decodeReverseFNaNbNiNfKAxEQDpQDoQCfZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj10Latin1CharZ9__mixin134skipFNaNbNiNfKAxEQDfQDeQBvZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj10Latin1CharZ9__mixin136encodeFNaNbNfwZAQBv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj10Latin1CharZ9__mixin136encodeFNaNbNiNfwKAQBxZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj10Latin1CharZ9__mixin136encodeFwDFQBpZvZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj10Latin1CharZ9canEncodeFNaNbNiNfwZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj10Latin2CharZ12encodingNameFNaNbNdNiNfZAya@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj10Latin2CharZ12m_charMapEndyw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj10Latin2CharZ14m_charMapStartyw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj10Latin2CharZ6bstMapyASQCn8typecons__T5TupleTuTaZQl@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj10Latin2CharZ7charMapyAu@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj10Latin2CharZ9__mixin1013encodedLengthFNaNbNiNfwZm@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj10Latin2CharZ9__mixin1015isValidCodeUnitFNaNbNiNfQCeZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj10Latin2CharZ9__mixin1019replacementSequenceFNaNbNdNiNfZAyEQDxQDwQCn@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj10Latin2CharZ9__mixin109canEncodeFNaNbNiNfwZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj10Latin2CharZ9__mixin10Qk13decodeReverseFNaNbNiNfKAxEQDrQDqQChZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj10Latin2CharZ9__mixin10Qk4skipFNaNbNiNfKAxEQDhQDgQBxZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj10Latin2CharZ9__mixin10Qk6encodeFNaNbNfwZAQBx@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj10Latin2CharZ9__mixin10Qk6encodeFNaNbNiNfwKAQBzZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj10Latin2CharZ9__mixin10Qk6encodeFwDFQBrZvZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1250CharZ12encodingNameFNaNbNdNiNfZAya@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1250CharZ12m_charMapEndyw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1250CharZ14m_charMapStartyw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1250CharZ6bstMapyASQCs8typecons__T5TupleTuTaZQl@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1250CharZ7charMapyAu@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1250CharZ9__mixin1013encodedLengthFNaNbNiNfwZm@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1250CharZ9__mixin1015isValidCodeUnitFNaNbNiNfQCjZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1250CharZ9__mixin1019replacementSequenceFNaNbNdNiNfZAyEQEcQEbQCs@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1250CharZ9__mixin109canEncodeFNaNbNiNfwZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1250CharZ9__mixin10Qk13decodeReverseFNaNbNiNfKAxEQDwQDvQCmZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1250CharZ9__mixin10Qk4skipFNaNbNiNfKAxEQDmQDlQCcZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1250CharZ9__mixin10Qk6encodeFNaNbNfwZAQCc@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1250CharZ9__mixin10Qk6encodeFNaNbNiNfwKAQCeZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1250CharZ9__mixin10Qk6encodeFwDFQBwZvZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1251CharZ12encodingNameFNaNbNdNiNfZAya@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1251CharZ12m_charMapEndyw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1251CharZ14m_charMapStartyw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1251CharZ6bstMapyASQCs8typecons__T5TupleTuTaZQl@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1251CharZ7charMapyAu@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1251CharZ9__mixin1013encodedLengthFNaNbNiNfwZm@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1251CharZ9__mixin1015isValidCodeUnitFNaNbNiNfQCjZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1251CharZ9__mixin1019replacementSequenceFNaNbNdNiNfZAyEQEcQEbQCs@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1251CharZ9__mixin109canEncodeFNaNbNiNfwZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1251CharZ9__mixin10Qk13decodeReverseFNaNbNiNfKAxEQDwQDvQCmZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1251CharZ9__mixin10Qk4skipFNaNbNiNfKAxEQDmQDlQCcZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1251CharZ9__mixin10Qk6encodeFNaNbNfwZAQCc@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1251CharZ9__mixin10Qk6encodeFNaNbNiNfwKAQCeZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1251CharZ9__mixin10Qk6encodeFwDFQBwZvZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1252CharZ12encodingNameFNaNbNdNiNfZAya@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1252CharZ12m_charMapEndyw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1252CharZ14m_charMapStartyw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1252CharZ6bstMapyASQCs8typecons__T5TupleTuTaZQl@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1252CharZ7charMapyAu@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1252CharZ9__mixin1013encodedLengthFNaNbNiNfwZm@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1252CharZ9__mixin1015isValidCodeUnitFNaNbNiNfQCjZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1252CharZ9__mixin1019replacementSequenceFNaNbNdNiNfZAyEQEcQEbQCs@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1252CharZ9__mixin109canEncodeFNaNbNiNfwZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1252CharZ9__mixin10Qk13decodeReverseFNaNbNiNfKAxEQDwQDvQCmZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1252CharZ9__mixin10Qk4skipFNaNbNiNfKAxEQDmQDlQCcZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1252CharZ9__mixin10Qk6encodeFNaNbNfwZAQCc@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1252CharZ9__mixin10Qk6encodeFNaNbNiNfwKAQCeZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj15Windows1252CharZ9__mixin10Qk6encodeFwDFQBwZvZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj9AsciiCharZ12encodingNameFNaNbNdNiNfZAya@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj9AsciiCharZ13encodedLengthFNaNbNiNfwZm@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj9AsciiCharZ15isValidCodeUnitFNaNbNiNfQBsZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj9AsciiCharZ19replacementSequenceFNaNbNdNiNfZAyEQDlQDkQCb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj9AsciiCharZ9__mixin1413decodeReverseFNaNbNiNfKAxEQDnQDmQCdZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj9AsciiCharZ9__mixin144skipFNaNbNiNfKAxEQDdQDcQBtZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj9AsciiCharZ9__mixin146encodeFNaNbNfwZAQBt@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj9AsciiCharZ9__mixin146encodeFNaNbNiNfwKAQBvZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj9AsciiCharZ9__mixin146encodeFwDFQBnZvZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTEQBkQBj9AsciiCharZ9canEncodeFNaNbNiNfwZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTaZ12encodingNameFNaNbNdNiNfZAya@Base 12
+ _D3std8encoding__T15EncoderInstanceHTaZ13encodedLengthFNaNbNiNfwZm@Base 12
+ _D3std8encoding__T15EncoderInstanceHTaZ15isValidCodeUnitFNaNbNiNfaZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTaZ19replacementSequenceFNaNbNdNiNfZAya@Base 12
+ _D3std8encoding__T15EncoderInstanceHTaZ5tailsFNaNbNiNfaZi@Base 12
+ _D3std8encoding__T15EncoderInstanceHTaZ9__mixin1513decodeReverseFNaNbNiNfKAxaZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTaZ9__mixin154skipFNaNbNiNfKAxaZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTaZ9__mixin156encodeFNaNbNfwZAa@Base 12
+ _D3std8encoding__T15EncoderInstanceHTaZ9__mixin156encodeFNaNbNiNfwKAaZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTaZ9__mixin156encodeFwDFaZvZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTaZ9canEncodeFNaNbNiNfwZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTaZ9tailTableyG128h@Base 12
+ _D3std8encoding__T15EncoderInstanceHTuZ12encodingNameFNaNbNdNiNfZAya@Base 12
+ _D3std8encoding__T15EncoderInstanceHTuZ13encodedLengthFNaNbNiNfwZm@Base 12
+ _D3std8encoding__T15EncoderInstanceHTuZ15isValidCodeUnitFNaNbNiNfuZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTuZ19replacementSequenceFNaNbNdNiNfZAyu@Base 12
+ _D3std8encoding__T15EncoderInstanceHTuZ9__mixin1313decodeReverseFNaNbNiNfKAxuZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTuZ9__mixin134skipFNaNbNiNfKAxuZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTuZ9__mixin136encodeFNaNbNfwZAu@Base 12
+ _D3std8encoding__T15EncoderInstanceHTuZ9__mixin136encodeFNaNbNiNfwKAuZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTuZ9__mixin136encodeFwDFuZvZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTuZ9canEncodeFNaNbNiNfwZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTwZ12encodingNameFNaNbNdNiNfZAya@Base 12
+ _D3std8encoding__T15EncoderInstanceHTwZ13encodedLengthFNaNbNiNfwZm@Base 12
+ _D3std8encoding__T15EncoderInstanceHTwZ15isValidCodeUnitFNaNbNiNfwZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTwZ19replacementSequenceFNaNbNdNiNfZAyw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTwZ9__mixin1313decodeReverseFNaNbNiNfKAxwZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTwZ9__mixin134skipFNaNbNiNfKAxwZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTwZ9__mixin136encodeFNaNbNfwZAw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTwZ9__mixin136encodeFNaNbNiNfwKAwZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTwZ9__mixin136encodeFwDFwZvZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTwZ9canEncodeFNaNbNiNfwZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk10Latin1CharZ12encodingNameFNaNbNdNiNfZAya@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk10Latin1CharZ13encodedLengthFNaNbNiNfwZm@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk10Latin1CharZ15isValidCodeUnitFNaNbNiNfEQDfQDeQBuZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk10Latin1CharZ19replacementSequenceFNaNbNdNiNfZAyEQDoQDnQCd@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk10Latin1CharZ9__mixin1313decodeReverseFNaNbNiNfKAxQCfZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk10Latin1CharZ9__mixin134skipFNaNbNiNfKAxQBvZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk10Latin1CharZ9__mixin136encodeFNaNbNfwZAEQDgQDfQBv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk10Latin1CharZ9__mixin136encodeFNaNbNiNfwKAEQDiQDhQBxZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk10Latin1CharZ9__mixin136encodeFwDFEQDaQCzQBpZvZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk10Latin1CharZ9__mixin13__T10safeDecodeTAxQBwZQtFNaNbNiNfKQsZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk10Latin1CharZ9__mixin13__T6decodeTAxQBrZQoFNaNbNiNfKQsZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk10Latin1CharZ9canEncodeFNaNbNiNfwZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk10Latin2CharZ12encodingNameFNaNbNdNiNfZAya@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk10Latin2CharZ12m_charMapEndyw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk10Latin2CharZ14m_charMapStartyw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk10Latin2CharZ6bstMapyASQCo8typecons__T5TupleTuTaZQl@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk10Latin2CharZ7charMapyAu@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk10Latin2CharZ9__mixin1013encodedLengthFNaNbNiNfwZm@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk10Latin2CharZ9__mixin1015isValidCodeUnitFNaNbNiNfEQDpQDoQCeZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk10Latin2CharZ9__mixin1019replacementSequenceFNaNbNdNiNfZAyEQDyQDxQCn@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk10Latin2CharZ9__mixin109canEncodeFNaNbNiNfwZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk10Latin2CharZ9__mixin10Qk13decodeReverseFNaNbNiNfKAxQChZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk10Latin2CharZ9__mixin10Qk4skipFNaNbNiNfKAxQBxZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk10Latin2CharZ9__mixin10Qk6encodeFNaNbNfwZAEQDiQDhQBx@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk10Latin2CharZ9__mixin10Qk6encodeFNaNbNiNfwKAEQDkQDjQBzZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk10Latin2CharZ9__mixin10Qk6encodeFwDFEQDcQDbQBrZvZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk10Latin2CharZ9__mixin10Qk__T10safeDecodeTAxQByZQtFNaNbNiNfKQsZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk10Latin2CharZ9__mixin10Qk__T6decodeTAxQBtZQoFNaNbNiNfKQsZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1250CharZ12encodingNameFNaNbNdNiNfZAya@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1250CharZ12m_charMapEndyw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1250CharZ14m_charMapStartyw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1250CharZ6bstMapyASQCt8typecons__T5TupleTuTaZQl@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1250CharZ7charMapyAu@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1250CharZ9__mixin1013encodedLengthFNaNbNiNfwZm@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1250CharZ9__mixin1015isValidCodeUnitFNaNbNiNfEQDuQDtQCjZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1250CharZ9__mixin1019replacementSequenceFNaNbNdNiNfZAyEQEdQEcQCs@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1250CharZ9__mixin109canEncodeFNaNbNiNfwZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1250CharZ9__mixin10Qk13decodeReverseFNaNbNiNfKAxQCmZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1250CharZ9__mixin10Qk4skipFNaNbNiNfKAxQCcZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1250CharZ9__mixin10Qk6encodeFNaNbNfwZAEQDnQDmQCc@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1250CharZ9__mixin10Qk6encodeFNaNbNiNfwKAEQDpQDoQCeZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1250CharZ9__mixin10Qk6encodeFwDFEQDhQDgQBwZvZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1250CharZ9__mixin10Qk__T10safeDecodeTAxQCdZQtFNaNbNiNfKQsZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1250CharZ9__mixin10Qk__T6decodeTAxQByZQoFNaNbNiNfKQsZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1251CharZ12encodingNameFNaNbNdNiNfZAya@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1251CharZ12m_charMapEndyw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1251CharZ14m_charMapStartyw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1251CharZ6bstMapyASQCt8typecons__T5TupleTuTaZQl@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1251CharZ7charMapyAu@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1251CharZ9__mixin1013encodedLengthFNaNbNiNfwZm@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1251CharZ9__mixin1015isValidCodeUnitFNaNbNiNfEQDuQDtQCjZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1251CharZ9__mixin1019replacementSequenceFNaNbNdNiNfZAyEQEdQEcQCs@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1251CharZ9__mixin109canEncodeFNaNbNiNfwZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1251CharZ9__mixin10Qk13decodeReverseFNaNbNiNfKAxQCmZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1251CharZ9__mixin10Qk4skipFNaNbNiNfKAxQCcZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1251CharZ9__mixin10Qk6encodeFNaNbNfwZAEQDnQDmQCc@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1251CharZ9__mixin10Qk6encodeFNaNbNiNfwKAEQDpQDoQCeZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1251CharZ9__mixin10Qk6encodeFwDFEQDhQDgQBwZvZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1251CharZ9__mixin10Qk__T10safeDecodeTAxQCdZQtFNaNbNiNfKQsZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1251CharZ9__mixin10Qk__T6decodeTAxQByZQoFNaNbNiNfKQsZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1252CharZ12encodingNameFNaNbNdNiNfZAya@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1252CharZ12m_charMapEndyw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1252CharZ14m_charMapStartyw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1252CharZ6bstMapyASQCt8typecons__T5TupleTuTaZQl@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1252CharZ7charMapyAu@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1252CharZ9__mixin1013encodedLengthFNaNbNiNfwZm@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1252CharZ9__mixin1015isValidCodeUnitFNaNbNiNfEQDuQDtQCjZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1252CharZ9__mixin1019replacementSequenceFNaNbNdNiNfZAyEQEdQEcQCs@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1252CharZ9__mixin109canEncodeFNaNbNiNfwZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1252CharZ9__mixin10Qk13decodeReverseFNaNbNiNfKAxQCmZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1252CharZ9__mixin10Qk4skipFNaNbNiNfKAxQCcZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1252CharZ9__mixin10Qk6encodeFNaNbNfwZAEQDnQDmQCc@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1252CharZ9__mixin10Qk6encodeFNaNbNiNfwKAEQDpQDoQCeZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1252CharZ9__mixin10Qk6encodeFwDFEQDhQDgQBwZvZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1252CharZ9__mixin10Qk__T10safeDecodeTAxQCdZQtFNaNbNiNfKQsZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk15Windows1252CharZ9__mixin10Qk__T6decodeTAxQByZQoFNaNbNiNfKQsZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk9AsciiCharZ12encodingNameFNaNbNdNiNfZAya@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk9AsciiCharZ13encodedLengthFNaNbNiNfwZm@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk9AsciiCharZ15isValidCodeUnitFNaNbNiNfEQDdQDcQBsZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk9AsciiCharZ19replacementSequenceFNaNbNdNiNfZAyEQDmQDlQCb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk9AsciiCharZ9__mixin1413decodeReverseFNaNbNiNfKAxQCdZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk9AsciiCharZ9__mixin144skipFNaNbNiNfKAxQBtZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk9AsciiCharZ9__mixin146encodeFNaNbNfwZAEQDeQDdQBt@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk9AsciiCharZ9__mixin146encodeFNaNbNiNfwKAEQDgQDfQBvZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk9AsciiCharZ9__mixin146encodeFwDFEQCyQCxQBnZvZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk9AsciiCharZ9__mixin14__T10safeDecodeTAxQBuZQtFNaNbNiNfKQsZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk9AsciiCharZ9__mixin14__T6decodeTAxQBpZQoFNaNbNiNfKQsZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxEQBlQBk9AsciiCharZ9canEncodeFNaNbNiNfwZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxaZ12encodingNameFNaNbNdNiNfZAya@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxaZ13encodedLengthFNaNbNiNfwZm@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxaZ15isValidCodeUnitFNaNbNiNfaZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxaZ19replacementSequenceFNaNbNdNiNfZAya@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxaZ5tailsFNaNbNiNfaZi@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxaZ9__mixin1513decodeReverseFNaNbNiNfKAxaZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxaZ9__mixin154skipFNaNbNiNfKAxaZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxaZ9__mixin156encodeFNaNbNfwZAa@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxaZ9__mixin156encodeFNaNbNiNfwKAaZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxaZ9__mixin156encodeFwDFaZvZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxaZ9__mixin15__T10safeDecodeTAxaZQrFNaNbNiNfKQqZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxaZ9__mixin15__T6decodeTAxaZQmFNaNbNiNfKQqZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxaZ9canEncodeFNaNbNiNfwZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxaZ9tailTableyG128h@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxuZ12encodingNameFNaNbNdNiNfZAya@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxuZ13encodedLengthFNaNbNiNfwZm@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxuZ15isValidCodeUnitFNaNbNiNfuZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxuZ19replacementSequenceFNaNbNdNiNfZAyu@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxuZ9__mixin1313decodeReverseFNaNbNiNfKAxuZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxuZ9__mixin134skipFNaNbNiNfKAxuZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxuZ9__mixin136encodeFNaNbNfwZAu@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxuZ9__mixin136encodeFNaNbNiNfwKAuZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxuZ9__mixin136encodeFwDFuZvZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxuZ9__mixin13__T10safeDecodeTAxuZQrFNaNbNiNfKQqZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxuZ9__mixin13__T6decodeTAxuZQmFNaNbNiNfKQqZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxuZ9canEncodeFNaNbNiNfwZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxwZ12encodingNameFNaNbNdNiNfZAya@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxwZ13encodedLengthFNaNbNiNfwZm@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxwZ15isValidCodeUnitFNaNbNiNfwZb@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxwZ19replacementSequenceFNaNbNdNiNfZAyw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxwZ9__mixin1313decodeReverseFNaNbNiNfKAxwZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxwZ9__mixin134skipFNaNbNiNfKAxwZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxwZ9__mixin136encodeFNaNbNfwZAw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxwZ9__mixin136encodeFNaNbNiNfwKAwZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxwZ9__mixin136encodeFwDFwZvZv@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxwZ9__mixin13__T10safeDecodeTAxwZQrFNaNbNiNfKQqZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxwZ9__mixin13__T6decodeTAxwZQmFNaNbNiNfKQqZw@Base 12
+ _D3std8encoding__T15EncoderInstanceHTxwZ9canEncodeFNaNbNiNfwZb@Base 12
+ _D3std8encoding__T6decodeTAxEQBbQBa10Latin1CharZQBeFNaNbNiNfKQBjZw@Base 12
+ _D3std8encoding__T6decodeTAxEQBbQBa10Latin2CharZQBeFNaNbNiNfKQBjZw@Base 12
+ _D3std8encoding__T6decodeTAxEQBbQBa15Windows1250CharZQBjFNaNbNiNfKQBoZw@Base 12
+ _D3std8encoding__T6decodeTAxEQBbQBa15Windows1251CharZQBjFNaNbNiNfKQBoZw@Base 12
+ _D3std8encoding__T6decodeTAxEQBbQBa15Windows1252CharZQBjFNaNbNiNfKQBoZw@Base 12
+ _D3std8encoding__T6decodeTAxEQBbQBa9AsciiCharZQBcFNaNbNiNfKQBhZw@Base 12
+ _D3std8encoding__T6decodeTAxaZQmFNaNbNiNfKQqZw@Base 12
+ _D3std8encoding__T6decodeTAxuZQmFNaNbNiNfKQqZw@Base 12
+ _D3std8encoding__T6decodeTAxwZQmFNaNbNiNfKQqZw@Base 12
+ _D3std8encoding__T6encodeTEQzQx10Latin1CharZQBaFNaNbNiNfwAQBgZm@Base 12
+ _D3std8encoding__T6encodeTEQzQx10Latin2CharZQBaFNaNbNiNfwAQBgZm@Base 12
+ _D3std8encoding__T6encodeTEQzQx15Windows1250CharZQBfFNaNbNiNfwAQBlZm@Base 12
+ _D3std8encoding__T6encodeTEQzQx15Windows1251CharZQBfFNaNbNiNfwAQBlZm@Base 12
+ _D3std8encoding__T6encodeTEQzQx15Windows1252CharZQBfFNaNbNiNfwAQBlZm@Base 12
+ _D3std8encoding__T6encodeTEQzQx9AsciiCharZQyFNaNbNiNfwAQBdZm@Base 12
+ _D3std8encoding__T6encodeTaZQkFNaNbNiNfwAaZm@Base 12
+ _D3std8encoding__T6encodeTuZQkFNaNbNiNfwAuZm@Base 12
+ _D3std8encoding__T6encodeTwZQkFNaNbNiNfwAwZm@Base 12
+ _D3std8encoding__T9canEncodeTEQBcQBb10Latin1CharZQBfFNaNbNiNfwZb@Base 12
+ _D3std8encoding__T9canEncodeTEQBcQBb10Latin2CharZQBfFNaNbNiNfwZb@Base 12
+ _D3std8encoding__T9canEncodeTEQBcQBb15Windows1250CharZQBkFNaNbNiNfwZb@Base 12
+ _D3std8encoding__T9canEncodeTEQBcQBb15Windows1251CharZQBkFNaNbNiNfwZb@Base 12
+ _D3std8encoding__T9canEncodeTEQBcQBb15Windows1252CharZQBkFNaNbNiNfwZb@Base 12
+ _D3std8encoding__T9canEncodeTEQBcQBb9AsciiCharZQBdFNaNbNiNfwZb@Base 12
+ _D3std8encoding__T9canEncodeTaZQnFNaNbNiNfwZb@Base 12
+ _D3std8encoding__T9canEncodeTuZQnFNaNbNiNfwZb@Base 12
+ _D3std8encoding__T9canEncodeTwZQnFNaNbNiNfwZb@Base 12
+ _D3std8internal10attributes11__moduleRefZ@Base 12
+ _D3std8internal10attributes12__ModuleInfoZ@Base 12
+ _D3std8internal11scopebuffer11__moduleRefZ@Base 12
+ _D3std8internal11scopebuffer12__ModuleInfoZ@Base 12
+ _D3std8internal12unicode_comp11__moduleRefZ@Base 12
+ _D3std8internal12unicode_comp12__ModuleInfoZ@Base 12
+ _D3std8internal12unicode_comp16compositionTableFNaNbNdNiNfZ1tyASQCkQCj14unicode_tables9CompEntry@Base 12
+ _D3std8internal12unicode_comp16compositionTableFNaNbNdNiNfZyASQCiQCh14unicode_tables9CompEntry@Base 12
+ _D3std8internal12unicode_norm11__moduleRefZ@Base 12
+ _D3std8internal12unicode_norm12__ModuleInfoZ@Base 12
+ _D3std8internal14unicode_decomp11__moduleRefZ@Base 12
+ _D3std8internal14unicode_decomp12__ModuleInfoZ@Base 12
+ _D3std8internal14unicode_decomp16decompCanonTableFNaNbNdNiNfZ1tyAw@Base 12
+ _D3std8internal14unicode_decomp16decompCanonTableFNaNbNdNiNfZyAw@Base 12
+ _D3std8internal14unicode_decomp17decompCompatTableFNaNbNdNiNfZ1tyAw@Base 12
+ _D3std8internal14unicode_decomp17decompCompatTableFNaNbNdNiNfZyAw@Base 12
+ _D3std8internal14unicode_tables10isSpaceGenFNaNbNiNfwZb@Base 12
+ _D3std8internal14unicode_tables10isWhiteGenFNaNbNiNfwZb@Base 12
+ _D3std8internal14unicode_tables11__moduleRefZ@Base 12
+ _D3std8internal14unicode_tables11isFormatGenFNaNbNiNfwZb@Base 12
+ _D3std8internal14unicode_tables12__ModuleInfoZ@Base 12
+ _D3std8internal14unicode_tables12isControlGenFNaNbNiNfwZb@Base 12
+ _D3std8internal14unicode_tables12toLowerTableFNaNbNdNiNfZ1tyAk@Base 12
+ _D3std8internal14unicode_tables12toLowerTableFNaNbNdNiNfZyAk@Base 12
+ _D3std8internal14unicode_tables12toTitleTableFNaNbNdNiNfZ1tyAk@Base 12
+ _D3std8internal14unicode_tables12toTitleTableFNaNbNdNiNfZyAk@Base 12
+ _D3std8internal14unicode_tables12toUpperTableFNaNbNdNiNfZ1tyAk@Base 12
+ _D3std8internal14unicode_tables12toUpperTableFNaNbNdNiNfZyAk@Base 12
+ _D3std8internal14unicode_tables13FullCaseEntry5valueMxFNaNbNdNiNjNeZAxw@Base 12
+ _D3std8internal14unicode_tables13FullCaseEntry6__initZ@Base 12
+ _D3std8internal14unicode_tables13fullCaseTableFNaNbNdNiNfZ1tyASQCjQCiQCc13FullCaseEntry@Base 12
+ _D3std8internal14unicode_tables13fullCaseTableFNaNbNdNiNfZyASQChQCgQCa13FullCaseEntry@Base 12
+ _D3std8internal14unicode_tables15SimpleCaseEntry4sizeMxFNaNbNdNiNfZh@Base 12
+ _D3std8internal14unicode_tables15SimpleCaseEntry6__initZ@Base 12
+ _D3std8internal14unicode_tables15SimpleCaseEntry7isLowerMxFNaNbNdNiNfZi@Base 12
+ _D3std8internal14unicode_tables15SimpleCaseEntry7isUpperMxFNaNbNdNiNfZi@Base 12
+ _D3std8internal14unicode_tables15UnicodeProperty11__xopEqualsMxFKxSQCnQCmQCgQBtZb@Base 12
+ _D3std8internal14unicode_tables15UnicodeProperty6__initZ@Base 12
+ _D3std8internal14unicode_tables15UnicodeProperty9__xtoHashFNbNeKxSQCmQClQCfQBsZm@Base 12
+ _D3std8internal14unicode_tables15simpleCaseTableFNaNbNdNiNfZ1tyASQClQCkQCe15SimpleCaseEntry@Base 12
+ _D3std8internal14unicode_tables15simpleCaseTableFNaNbNdNiNfZyASQCjQCiQCc15SimpleCaseEntry@Base 12
+ _D3std8internal14unicode_tables6blocks10DevanagariyAh@Base 12
+ _D3std8internal14unicode_tables6blocks10GlagoliticyAh@Base 12
+ _D3std8internal14unicode_tables6blocks10KharoshthiyAh@Base 12
+ _D3std8internal14unicode_tables6blocks10Old_ItalicyAh@Base 12
+ _D3std8internal14unicode_tables6blocks10Old_TurkicyAh@Base 12
+ _D3std8internal14unicode_tables6blocks10PhoenicianyAh@Base 12
+ _D3std8internal14unicode_tables6blocks10SaurashtrayAh@Base 12
+ _D3std8internal14unicode_tables6blocks11Basic_LatinyAh@Base 12
+ _D3std8internal14unicode_tables6blocks11Box_DrawingyAh@Base 12
+ _D3std8internal14unicode_tables6blocks11CJK_StrokesyAh@Base 12
+ _D3std8internal14unicode_tables6blocks11Hangul_JamoyAh@Base 12
+ _D3std8internal14unicode_tables6blocks11New_Tai_LueyAh@Base 12
+ _D3std8internal14unicode_tables6blocks11Old_PersianyAh@Base 12
+ _D3std8internal14unicode_tables6blocks11Yi_RadicalsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks12Domino_TilesyAh@Base 12
+ _D3std8internal14unicode_tables6blocks12Meetei_MayekyAh@Base 12
+ _D3std8internal14unicode_tables6blocks12Number_FormsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks12Sora_SompengyAh@Base 12
+ _D3std8internal14unicode_tables6blocks12Syloti_NagriyAh@Base 12
+ _D3std8internal14unicode_tables6blocks12Yi_SyllablesyAh@Base 12
+ _D3std8internal14unicode_tables6blocks13Khmer_SymbolsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks13Mahjong_TilesyAh@Base 12
+ _D3std8internal14unicode_tables6blocks13Phaistos_DiscyAh@Base 12
+ _D3std8internal14unicode_tables6blocks13Playing_CardsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks14Aegean_NumbersyAh@Base 12
+ _D3std8internal14unicode_tables6blocks14Block_ElementsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks14Greek_ExtendedyAh@Base 12
+ _D3std8internal14unicode_tables6blocks14IPA_ExtensionsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks14Low_SurrogatesyAh@Base 12
+ _D3std8internal14unicode_tables6blocks14Vertical_FormsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks15Ancient_SymbolsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks15High_SurrogatesyAh@Base 12
+ _D3std8internal14unicode_tables6blocks15Kana_SupplementyAh@Base 12
+ _D3std8internal14unicode_tables6blocks15Kangxi_RadicalsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks15Musical_SymbolsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks16Bamum_SupplementyAh@Base 12
+ _D3std8internal14unicode_tables6blocks16Braille_PatternsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks16Control_PicturesyAh@Base 12
+ _D3std8internal14unicode_tables6blocks16Currency_SymbolsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks16Geometric_ShapesyAh@Base 12
+ _D3std8internal14unicode_tables6blocks16Greek_and_CopticyAh@Base 12
+ _D3std8internal14unicode_tables6blocks16Hangul_SyllablesyAh@Base 12
+ _D3std8internal14unicode_tables6blocks16Imperial_AramaicyAh@Base 12
+ _D3std8internal14unicode_tables6blocks16Latin_Extended_AyAh@Base 12
+ _D3std8internal14unicode_tables6blocks16Latin_Extended_ByAh@Base 12
+ _D3std8internal14unicode_tables6blocks16Latin_Extended_CyAh@Base 12
+ _D3std8internal14unicode_tables6blocks16Latin_Extended_DyAh@Base 12
+ _D3std8internal14unicode_tables6blocks16Meroitic_CursiveyAh@Base 12
+ _D3std8internal14unicode_tables6blocks16Private_Use_AreayAh@Base 12
+ _D3std8internal14unicode_tables6blocks16Vedic_ExtensionsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks17Arabic_Extended_AyAh@Base 12
+ _D3std8internal14unicode_tables6blocks17Arabic_SupplementyAh@Base 12
+ _D3std8internal14unicode_tables6blocks17Bopomofo_ExtendedyAh@Base 12
+ _D3std8internal14unicode_tables6blocks17CJK_CompatibilityyAh@Base 12
+ _D3std8internal14unicode_tables6blocks17Cypriot_SyllabaryyAh@Base 12
+ _D3std8internal14unicode_tables6blocks17Ethiopic_ExtendedyAh@Base 12
+ _D3std8internal14unicode_tables6blocks17Old_South_ArabianyAh@Base 12
+ _D3std8internal14unicode_tables6blocks18Alchemical_SymbolsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks18Latin_1_SupplementyAh@Base 12
+ _D3std8internal14unicode_tables6blocks18Letterlike_SymbolsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks18Linear_B_IdeogramsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks18Linear_B_SyllabaryyAh@Base 12
+ _D3std8internal14unicode_tables6blocks18Myanmar_Extended_AyAh@Base 12
+ _D3std8internal14unicode_tables6blocks19Cyrillic_Extended_AyAh@Base 12
+ _D3std8internal14unicode_tables6blocks19Cyrillic_Extended_ByAh@Base 12
+ _D3std8internal14unicode_tables6blocks19Cyrillic_SupplementyAh@Base 12
+ _D3std8internal14unicode_tables6blocks19Devanagari_ExtendedyAh@Base 12
+ _D3std8internal14unicode_tables6blocks19Ethiopic_Extended_AyAh@Base 12
+ _D3std8internal14unicode_tables6blocks19Ethiopic_SupplementyAh@Base 12
+ _D3std8internal14unicode_tables6blocks19General_PunctuationyAh@Base 12
+ _D3std8internal14unicode_tables6blocks19Georgian_SupplementyAh@Base 12
+ _D3std8internal14unicode_tables6blocks19Phonetic_ExtensionsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks19Small_Form_VariantsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks19Variation_SelectorsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks20Combining_Half_MarksyAh@Base 12
+ _D3std8internal14unicode_tables6blocks20Egyptian_HieroglyphsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks20Meroitic_HieroglyphsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks20Rumi_Numeral_SymbolsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks20Sundanese_SupplementyAh@Base 12
+ _D3std8internal14unicode_tables6blocks21Ancient_Greek_NumbersyAh@Base 12
+ _D3std8internal14unicode_tables6blocks21Counting_Rod_NumeralsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks21Inscriptional_PahlaviyAh@Base 12
+ _D3std8internal14unicode_tables6blocks21Miscellaneous_SymbolsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks21Modifier_Tone_LettersyAh@Base 12
+ _D3std8internal14unicode_tables6blocks21Supplemental_Arrows_AyAh@Base 12
+ _D3std8internal14unicode_tables6blocks21Supplemental_Arrows_ByAh@Base 12
+ _D3std8internal14unicode_tables6blocks21Tai_Xuan_Jing_SymbolsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks22CJK_Unified_IdeographsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks22Enclosed_AlphanumericsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks22Hangul_Jamo_Extended_AyAh@Base 12
+ _D3std8internal14unicode_tables6blocks22Hangul_Jamo_Extended_ByAh@Base 12
+ _D3std8internal14unicode_tables6blocks22Inscriptional_ParthianyAh@Base 12
+ _D3std8internal14unicode_tables6blocks22Mathematical_OperatorsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks23CJK_Compatibility_FormsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks23CJK_Radicals_SupplementyAh@Base 12
+ _D3std8internal14unicode_tables6blocks23Meetei_Mayek_ExtensionsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks23Miscellaneous_TechnicalyAh@Base 12
+ _D3std8internal14unicode_tables6blocks23Yijing_Hexagram_SymbolsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks24Spacing_Modifier_LettersyAh@Base 12
+ _D3std8internal14unicode_tables6blocks24Supplemental_PunctuationyAh@Base 12
+ _D3std8internal14unicode_tables6blocks25Byzantine_Musical_SymbolsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks25Common_Indic_Number_FormsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks25Hangul_Compatibility_JamoyAh@Base 12
+ _D3std8internal14unicode_tables6blocks25Latin_Extended_AdditionalyAh@Base 12
+ _D3std8internal14unicode_tables6blocks25Transport_And_Map_SymbolsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks27Arabic_Presentation_Forms_AyAh@Base 12
+ _D3std8internal14unicode_tables6blocks27Arabic_Presentation_Forms_ByAh@Base 12
+ _D3std8internal14unicode_tables6blocks27CJK_Symbols_and_PunctuationyAh@Base 12
+ _D3std8internal14unicode_tables6blocks27Combining_Diacritical_MarksyAh@Base 12
+ _D3std8internal14unicode_tables6blocks27High_Private_Use_SurrogatesyAh@Base 12
+ _D3std8internal14unicode_tables6blocks27Superscripts_and_SubscriptsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks28CJK_Compatibility_IdeographsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks28Katakana_Phonetic_ExtensionsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks29Alphabetic_Presentation_FormsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks29Halfwidth_and_Fullwidth_FormsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks29Optical_Character_RecognitionyAh@Base 12
+ _D3std8internal14unicode_tables6blocks30Ancient_Greek_Musical_NotationyAh@Base 12
+ _D3std8internal14unicode_tables6blocks30Phonetic_Extensions_SupplementyAh@Base 12
+ _D3std8internal14unicode_tables6blocks30Variation_Selectors_SupplementyAh@Base 12
+ _D3std8internal14unicode_tables6blocks31Enclosed_CJK_Letters_and_MonthsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks31Enclosed_Ideographic_SupplementyAh@Base 12
+ _D3std8internal14unicode_tables6blocks32Enclosed_Alphanumeric_SupplementyAh@Base 12
+ _D3std8internal14unicode_tables6blocks32Miscellaneous_Symbols_and_ArrowsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks32Supplementary_Private_Use_Area_AyAh@Base 12
+ _D3std8internal14unicode_tables6blocks32Supplementary_Private_Use_Area_ByAh@Base 12
+ _D3std8internal14unicode_tables6blocks33Cuneiform_Numbers_and_PunctuationyAh@Base 12
+ _D3std8internal14unicode_tables6blocks33Mathematical_Alphanumeric_SymbolsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks34CJK_Unified_Ideographs_Extension_AyAh@Base 12
+ _D3std8internal14unicode_tables6blocks34CJK_Unified_Ideographs_Extension_ByAh@Base 12
+ _D3std8internal14unicode_tables6blocks34CJK_Unified_Ideographs_Extension_CyAh@Base 12
+ _D3std8internal14unicode_tables6blocks34CJK_Unified_Ideographs_Extension_DyAh@Base 12
+ _D3std8internal14unicode_tables6blocks34Ideographic_Description_CharactersyAh@Base 12
+ _D3std8internal14unicode_tables6blocks35Supplemental_Mathematical_OperatorsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks36Miscellaneous_Mathematical_Symbols_AyAh@Base 12
+ _D3std8internal14unicode_tables6blocks36Miscellaneous_Mathematical_Symbols_ByAh@Base 12
+ _D3std8internal14unicode_tables6blocks37Miscellaneous_Symbols_And_PictographsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks37Unified_Canadian_Aboriginal_SyllabicsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks38Arabic_Mathematical_Alphabetic_SymbolsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks38Combining_Diacritical_Marks_SupplementyAh@Base 12
+ _D3std8internal14unicode_tables6blocks39CJK_Compatibility_Ideographs_SupplementyAh@Base 12
+ _D3std8internal14unicode_tables6blocks39Combining_Diacritical_Marks_for_SymbolsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks3LaoyAh@Base 12
+ _D3std8internal14unicode_tables6blocks3NKoyAh@Base 12
+ _D3std8internal14unicode_tables6blocks3VaiyAh@Base 12
+ _D3std8internal14unicode_tables6blocks3tabFNaNdNfZAySQBzQByQBs15UnicodeProperty@Base 12
+ _D3std8internal14unicode_tables6blocks46Unified_Canadian_Aboriginal_Syllabics_ExtendedyAh@Base 12
+ _D3std8internal14unicode_tables6blocks4ChamyAh@Base 12
+ _D3std8internal14unicode_tables6blocks4LisuyAh@Base 12
+ _D3std8internal14unicode_tables6blocks4MiaoyAh@Base 12
+ _D3std8internal14unicode_tables6blocks4TagsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks4ThaiyAh@Base 12
+ _D3std8internal14unicode_tables6blocks4_tabyASQBsQBrQBl15UnicodeProperty@Base 12
+ _D3std8internal14unicode_tables6blocks5BamumyAh@Base 12
+ _D3std8internal14unicode_tables6blocks5BatakyAh@Base 12
+ _D3std8internal14unicode_tables6blocks5BuhidyAh@Base 12
+ _D3std8internal14unicode_tables6blocks5KhmeryAh@Base 12
+ _D3std8internal14unicode_tables6blocks5LimbuyAh@Base 12
+ _D3std8internal14unicode_tables6blocks5OghamyAh@Base 12
+ _D3std8internal14unicode_tables6blocks5OriyayAh@Base 12
+ _D3std8internal14unicode_tables6blocks5RunicyAh@Base 12
+ _D3std8internal14unicode_tables6blocks5TakriyAh@Base 12
+ _D3std8internal14unicode_tables6blocks5TamilyAh@Base 12
+ _D3std8internal14unicode_tables6blocks6ArabicyAh@Base 12
+ _D3std8internal14unicode_tables6blocks6ArrowsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks6BrahmiyAh@Base 12
+ _D3std8internal14unicode_tables6blocks6CarianyAh@Base 12
+ _D3std8internal14unicode_tables6blocks6ChakmayAh@Base 12
+ _D3std8internal14unicode_tables6blocks6CopticyAh@Base 12
+ _D3std8internal14unicode_tables6blocks6GothicyAh@Base 12
+ _D3std8internal14unicode_tables6blocks6HebrewyAh@Base 12
+ _D3std8internal14unicode_tables6blocks6KaithiyAh@Base 12
+ _D3std8internal14unicode_tables6blocks6KanbunyAh@Base 12
+ _D3std8internal14unicode_tables6blocks6LepchayAh@Base 12
+ _D3std8internal14unicode_tables6blocks6LycianyAh@Base 12
+ _D3std8internal14unicode_tables6blocks6LydianyAh@Base 12
+ _D3std8internal14unicode_tables6blocks6RejangyAh@Base 12
+ _D3std8internal14unicode_tables6blocks6SyriacyAh@Base 12
+ _D3std8internal14unicode_tables6blocks6Tai_LeyAh@Base 12
+ _D3std8internal14unicode_tables6blocks6TeluguyAh@Base 12
+ _D3std8internal14unicode_tables6blocks6ThaanayAh@Base 12
+ _D3std8internal14unicode_tables6blocks6__initZ@Base 12
+ _D3std8internal14unicode_tables6blocks7AvestanyAh@Base 12
+ _D3std8internal14unicode_tables6blocks7BengaliyAh@Base 12
+ _D3std8internal14unicode_tables6blocks7DeseretyAh@Base 12
+ _D3std8internal14unicode_tables6blocks7HanunooyAh@Base 12
+ _D3std8internal14unicode_tables6blocks7KannadayAh@Base 12
+ _D3std8internal14unicode_tables6blocks7MandaicyAh@Base 12
+ _D3std8internal14unicode_tables6blocks7MyanmaryAh@Base 12
+ _D3std8internal14unicode_tables6blocks7OsmanyayAh@Base 12
+ _D3std8internal14unicode_tables6blocks7SharadayAh@Base 12
+ _D3std8internal14unicode_tables6blocks7ShavianyAh@Base 12
+ _D3std8internal14unicode_tables6blocks7SinhalayAh@Base 12
+ _D3std8internal14unicode_tables6blocks7TagalogyAh@Base 12
+ _D3std8internal14unicode_tables6blocks7TibetanyAh@Base 12
+ _D3std8internal14unicode_tables6blocks8ArmenianyAh@Base 12
+ _D3std8internal14unicode_tables6blocks8BalineseyAh@Base 12
+ _D3std8internal14unicode_tables6blocks8BopomofoyAh@Base 12
+ _D3std8internal14unicode_tables6blocks8BugineseyAh@Base 12
+ _D3std8internal14unicode_tables6blocks8CherokeeyAh@Base 12
+ _D3std8internal14unicode_tables6blocks8CyrillicyAh@Base 12
+ _D3std8internal14unicode_tables6blocks8DingbatsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks8EthiopicyAh@Base 12
+ _D3std8internal14unicode_tables6blocks8GeorgianyAh@Base 12
+ _D3std8internal14unicode_tables6blocks8GujaratiyAh@Base 12
+ _D3std8internal14unicode_tables6blocks8GurmukhiyAh@Base 12
+ _D3std8internal14unicode_tables6blocks8HiraganayAh@Base 12
+ _D3std8internal14unicode_tables6blocks8JavaneseyAh@Base 12
+ _D3std8internal14unicode_tables6blocks8KatakanayAh@Base 12
+ _D3std8internal14unicode_tables6blocks8Kayah_LiyAh@Base 12
+ _D3std8internal14unicode_tables6blocks8Ol_ChikiyAh@Base 12
+ _D3std8internal14unicode_tables6blocks8Phags_payAh@Base 12
+ _D3std8internal14unicode_tables6blocks8SpecialsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks8TagbanwayAh@Base 12
+ _D3std8internal14unicode_tables6blocks8Tai_ThamyAh@Base 12
+ _D3std8internal14unicode_tables6blocks8Tai_VietyAh@Base 12
+ _D3std8internal14unicode_tables6blocks8TifinaghyAh@Base 12
+ _D3std8internal14unicode_tables6blocks8UgariticyAh@Base 12
+ _D3std8internal14unicode_tables6blocks9CuneiformyAh@Base 12
+ _D3std8internal14unicode_tables6blocks9EmoticonsyAh@Base 12
+ _D3std8internal14unicode_tables6blocks9MalayalamyAh@Base 12
+ _D3std8internal14unicode_tables6blocks9MongolianyAh@Base 12
+ _D3std8internal14unicode_tables6blocks9SamaritanyAh@Base 12
+ _D3std8internal14unicode_tables6blocks9SundaneseyAh@Base 12
+ _D3std8internal14unicode_tables6hangul1LyAh@Base 12
+ _D3std8internal14unicode_tables6hangul1TyAh@Base 12
+ _D3std8internal14unicode_tables6hangul1VyAh@Base 12
+ _D3std8internal14unicode_tables6hangul2LVyAh@Base 12
+ _D3std8internal14unicode_tables6hangul3LVTyAh@Base 12
+ _D3std8internal14unicode_tables6hangul3tabFNaNbNdNiNfZAySQCdQCcQBw15UnicodeProperty@Base 12
+ _D3std8internal14unicode_tables6hangul4_tabyASQBsQBrQBl15UnicodeProperty@Base 12
+ _D3std8internal14unicode_tables6hangul6__initZ@Base 12
+ _D3std8internal14unicode_tables7isHangLFNaNbNiNfwZb@Base 12
+ _D3std8internal14unicode_tables7isHangTFNaNbNiNfwZb@Base 12
+ _D3std8internal14unicode_tables7isHangVFNaNbNiNfwZb@Base 12
+ _D3std8internal14unicode_tables7scripts10DevanagariyAh@Base 12
+ _D3std8internal14unicode_tables7scripts10GlagoliticyAh@Base 12
+ _D3std8internal14unicode_tables7scripts10KharoshthiyAh@Base 12
+ _D3std8internal14unicode_tables7scripts10Old_ItalicyAh@Base 12
+ _D3std8internal14unicode_tables7scripts10Old_TurkicyAh@Base 12
+ _D3std8internal14unicode_tables7scripts10PhoenicianyAh@Base 12
+ _D3std8internal14unicode_tables7scripts10SaurashtrayAh@Base 12
+ _D3std8internal14unicode_tables7scripts11New_Tai_LueyAh@Base 12
+ _D3std8internal14unicode_tables7scripts11Old_PersianyAh@Base 12
+ _D3std8internal14unicode_tables7scripts12Meetei_MayekyAh@Base 12
+ _D3std8internal14unicode_tables7scripts12Sora_SompengyAh@Base 12
+ _D3std8internal14unicode_tables7scripts12Syloti_NagriyAh@Base 12
+ _D3std8internal14unicode_tables7scripts16Imperial_AramaicyAh@Base 12
+ _D3std8internal14unicode_tables7scripts16Meroitic_CursiveyAh@Base 12
+ _D3std8internal14unicode_tables7scripts17Old_South_ArabianyAh@Base 12
+ _D3std8internal14unicode_tables7scripts19Canadian_AboriginalyAh@Base 12
+ _D3std8internal14unicode_tables7scripts20Egyptian_HieroglyphsyAh@Base 12
+ _D3std8internal14unicode_tables7scripts20Meroitic_HieroglyphsyAh@Base 12
+ _D3std8internal14unicode_tables7scripts21Inscriptional_PahlaviyAh@Base 12
+ _D3std8internal14unicode_tables7scripts22Inscriptional_ParthianyAh@Base 12
+ _D3std8internal14unicode_tables7scripts2YiyAh@Base 12
+ _D3std8internal14unicode_tables7scripts3HanyAh@Base 12
+ _D3std8internal14unicode_tables7scripts3LaoyAh@Base 12
+ _D3std8internal14unicode_tables7scripts3NkoyAh@Base 12
+ _D3std8internal14unicode_tables7scripts3VaiyAh@Base 12
+ _D3std8internal14unicode_tables7scripts3tabFNaNbNdNiNfZAySQCeQCdQBx15UnicodeProperty@Base 12
+ _D3std8internal14unicode_tables7scripts4ChamyAh@Base 12
+ _D3std8internal14unicode_tables7scripts4LisuyAh@Base 12
+ _D3std8internal14unicode_tables7scripts4MiaoyAh@Base 12
+ _D3std8internal14unicode_tables7scripts4ThaiyAh@Base 12
+ _D3std8internal14unicode_tables7scripts4_tabyASQBtQBsQBm15UnicodeProperty@Base 12
+ _D3std8internal14unicode_tables7scripts5BamumyAh@Base 12
+ _D3std8internal14unicode_tables7scripts5BatakyAh@Base 12
+ _D3std8internal14unicode_tables7scripts5BuhidyAh@Base 12
+ _D3std8internal14unicode_tables7scripts5GreekyAh@Base 12
+ _D3std8internal14unicode_tables7scripts5KhmeryAh@Base 12
+ _D3std8internal14unicode_tables7scripts5LatinyAh@Base 12
+ _D3std8internal14unicode_tables7scripts5LimbuyAh@Base 12
+ _D3std8internal14unicode_tables7scripts5OghamyAh@Base 12
+ _D3std8internal14unicode_tables7scripts5OriyayAh@Base 12
+ _D3std8internal14unicode_tables7scripts5RunicyAh@Base 12
+ _D3std8internal14unicode_tables7scripts5TakriyAh@Base 12
+ _D3std8internal14unicode_tables7scripts5TamilyAh@Base 12
+ _D3std8internal14unicode_tables7scripts6ArabicyAh@Base 12
+ _D3std8internal14unicode_tables7scripts6BrahmiyAh@Base 12
+ _D3std8internal14unicode_tables7scripts6CarianyAh@Base 12
+ _D3std8internal14unicode_tables7scripts6ChakmayAh@Base 12
+ _D3std8internal14unicode_tables7scripts6CommonyAh@Base 12
+ _D3std8internal14unicode_tables7scripts6CopticyAh@Base 12
+ _D3std8internal14unicode_tables7scripts6GothicyAh@Base 12
+ _D3std8internal14unicode_tables7scripts6HangulyAh@Base 12
+ _D3std8internal14unicode_tables7scripts6HebrewyAh@Base 12
+ _D3std8internal14unicode_tables7scripts6KaithiyAh@Base 12
+ _D3std8internal14unicode_tables7scripts6LepchayAh@Base 12
+ _D3std8internal14unicode_tables7scripts6LycianyAh@Base 12
+ _D3std8internal14unicode_tables7scripts6LydianyAh@Base 12
+ _D3std8internal14unicode_tables7scripts6RejangyAh@Base 12
+ _D3std8internal14unicode_tables7scripts6SyriacyAh@Base 12
+ _D3std8internal14unicode_tables7scripts6Tai_LeyAh@Base 12
+ _D3std8internal14unicode_tables7scripts6TeluguyAh@Base 12
+ _D3std8internal14unicode_tables7scripts6ThaanayAh@Base 12
+ _D3std8internal14unicode_tables7scripts6__initZ@Base 12
+ _D3std8internal14unicode_tables7scripts7AvestanyAh@Base 12
+ _D3std8internal14unicode_tables7scripts7BengaliyAh@Base 12
+ _D3std8internal14unicode_tables7scripts7BrailleyAh@Base 12
+ _D3std8internal14unicode_tables7scripts7CypriotyAh@Base 12
+ _D3std8internal14unicode_tables7scripts7DeseretyAh@Base 12
+ _D3std8internal14unicode_tables7scripts7HanunooyAh@Base 12
+ _D3std8internal14unicode_tables7scripts7KannadayAh@Base 12
+ _D3std8internal14unicode_tables7scripts7MandaicyAh@Base 12
+ _D3std8internal14unicode_tables7scripts7MyanmaryAh@Base 12
+ _D3std8internal14unicode_tables7scripts7OsmanyayAh@Base 12
+ _D3std8internal14unicode_tables7scripts7SharadayAh@Base 12
+ _D3std8internal14unicode_tables7scripts7ShavianyAh@Base 12
+ _D3std8internal14unicode_tables7scripts7SinhalayAh@Base 12
+ _D3std8internal14unicode_tables7scripts7TagalogyAh@Base 12
+ _D3std8internal14unicode_tables7scripts7TibetanyAh@Base 12
+ _D3std8internal14unicode_tables7scripts8ArmenianyAh@Base 12
+ _D3std8internal14unicode_tables7scripts8BalineseyAh@Base 12
+ _D3std8internal14unicode_tables7scripts8BopomofoyAh@Base 12
+ _D3std8internal14unicode_tables7scripts8BugineseyAh@Base 12
+ _D3std8internal14unicode_tables7scripts8CherokeeyAh@Base 12
+ _D3std8internal14unicode_tables7scripts8CyrillicyAh@Base 12
+ _D3std8internal14unicode_tables7scripts8EthiopicyAh@Base 12
+ _D3std8internal14unicode_tables7scripts8GeorgianyAh@Base 12
+ _D3std8internal14unicode_tables7scripts8GujaratiyAh@Base 12
+ _D3std8internal14unicode_tables7scripts8GurmukhiyAh@Base 12
+ _D3std8internal14unicode_tables7scripts8HiraganayAh@Base 12
+ _D3std8internal14unicode_tables7scripts8JavaneseyAh@Base 12
+ _D3std8internal14unicode_tables7scripts8KatakanayAh@Base 12
+ _D3std8internal14unicode_tables7scripts8Kayah_LiyAh@Base 12
+ _D3std8internal14unicode_tables7scripts8Linear_ByAh@Base 12
+ _D3std8internal14unicode_tables7scripts8Ol_ChikiyAh@Base 12
+ _D3std8internal14unicode_tables7scripts8Phags_PayAh@Base 12
+ _D3std8internal14unicode_tables7scripts8TagbanwayAh@Base 12
+ _D3std8internal14unicode_tables7scripts8Tai_ThamyAh@Base 12
+ _D3std8internal14unicode_tables7scripts8Tai_VietyAh@Base 12
+ _D3std8internal14unicode_tables7scripts8TifinaghyAh@Base 12
+ _D3std8internal14unicode_tables7scripts8UgariticyAh@Base 12
+ _D3std8internal14unicode_tables7scripts9CuneiformyAh@Base 12
+ _D3std8internal14unicode_tables7scripts9InheritedyAh@Base 12
+ _D3std8internal14unicode_tables7scripts9MalayalamyAh@Base 12
+ _D3std8internal14unicode_tables7scripts9MongolianyAh@Base 12
+ _D3std8internal14unicode_tables7scripts9SamaritanyAh@Base 12
+ _D3std8internal14unicode_tables7scripts9SundaneseyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps10AlphabeticyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps10DeprecatedyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps10Other_MathyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps11ID_ContinueyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps11IdeographicyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps11Soft_DottedyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps11White_SpaceyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps12Bidi_ControlyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps12Join_ControlyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps12XID_ContinueyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps13Grapheme_BaseyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps13Grapheme_LinkyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps14Case_IgnorableyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps14Other_ID_StartyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps14Pattern_SyntaxyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps14Quotation_MarkyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps15ASCII_Hex_DigityAh@Base 12
+ _D3std8internal14unicode_tables8uniProps15Grapheme_ExtendyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps15Other_LowercaseyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps15Other_UppercaseyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps16Other_AlphabeticyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps17Other_ID_ContinueyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps17Unified_IdeographyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps18Variation_SelectoryAh@Base 12
+ _D3std8internal14unicode_tables8uniProps19IDS_Binary_OperatoryAh@Base 12
+ _D3std8internal14unicode_tables8uniProps19Pattern_White_SpaceyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps20IDS_Trinary_OperatoryAh@Base 12
+ _D3std8internal14unicode_tables8uniProps20Terminal_PunctuationyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps21Other_Grapheme_ExtendyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps23Logical_Order_ExceptionyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps23Noncharacter_Code_PointyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps28Default_Ignorable_Code_PointyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps2CcyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps2CfyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps2CnyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps2CoyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps2CsyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps2LlyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps2LmyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps2LoyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps2LtyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps2LuyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps2McyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps2MeyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps2MnyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps2NdyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps2NlyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps2NoyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps2PcyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps2PdyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps2PeyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps2PfyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps2PiyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps2PoyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps2PsyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps2ScyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps2SkyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps2SmyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps2SoyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps2ZlyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps2ZpyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps2ZsyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps34Other_Default_Ignorable_Code_PointyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps3tabFNaNdNfZAySQCbQCaQBu15UnicodeProperty@Base 12
+ _D3std8internal14unicode_tables8uniProps4DashyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps4MathyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps4_tabyASQBuQBtQBn15UnicodeProperty@Base 12
+ _D3std8internal14unicode_tables8uniProps5CasedyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps5STermyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps6HyphenyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps6__initZ@Base 12
+ _D3std8internal14unicode_tables8uniProps7RadicalyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps8ExtenderyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps8ID_StartyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps9DiacriticyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps9Hex_DigityAh@Base 12
+ _D3std8internal14unicode_tables8uniProps9LowercaseyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps9UppercaseyAh@Base 12
+ _D3std8internal14unicode_tables8uniProps9XID_StartyAh@Base 12
+ _D3std8internal14unicode_tables9CompEntry6__initZ@Base 12
+ _D3std8internal14unicode_tables__T9TrieEntryTbVii7Vii4Vii4Vii6ZQBd11__xopEqualsMxFKxSQDfQDeQCy__TQClTbVii7Vii4Vii4Vii6ZQDhZb@Base 12
+ _D3std8internal14unicode_tables__T9TrieEntryTbVii7Vii4Vii4Vii6ZQBd6__initZ@Base 12
+ _D3std8internal14unicode_tables__T9TrieEntryTbVii7Vii4Vii4Vii6ZQBd9__xtoHashFNbNeKxSQDeQDdQCx__TQCkTbVii7Vii4Vii4Vii6ZQDgZm@Base 12
+ _D3std8internal14unicode_tables__T9TrieEntryTbVii8Vii4Vii9ZQz11__xopEqualsMxFKxSQDaQCzQCt__TQCgTbVii8Vii4Vii9ZQCyZb@Base 12
+ _D3std8internal14unicode_tables__T9TrieEntryTbVii8Vii4Vii9ZQz6__initZ@Base 12
+ _D3std8internal14unicode_tables__T9TrieEntryTbVii8Vii4Vii9ZQz9__xtoHashFNbNeKxSQCzQCyQCs__TQCfTbVii8Vii4Vii9ZQCxZm@Base 12
+ _D3std8internal14unicode_tables__T9TrieEntryTbVii8Vii5Vii8ZQz11__xopEqualsMxFKxSQDaQCzQCt__TQCgTbVii8Vii5Vii8ZQCyZb@Base 12
+ _D3std8internal14unicode_tables__T9TrieEntryTbVii8Vii5Vii8ZQz6__initZ@Base 12
+ _D3std8internal14unicode_tables__T9TrieEntryTbVii8Vii5Vii8ZQz9__xtoHashFNbNeKxSQCzQCyQCs__TQCfTbVii8Vii5Vii8ZQCxZm@Base 12
+ _D3std8internal14unicode_tables__T9TrieEntryTbVii8Vii6Vii7ZQz11__xopEqualsMxFKxSQDaQCzQCt__TQCgTbVii8Vii6Vii7ZQCyZb@Base 12
+ _D3std8internal14unicode_tables__T9TrieEntryTbVii8Vii6Vii7ZQz6__initZ@Base 12
+ _D3std8internal14unicode_tables__T9TrieEntryTbVii8Vii6Vii7ZQz9__xtoHashFNbNeKxSQCzQCyQCs__TQCfTbVii8Vii6Vii7ZQCxZm@Base 12
+ _D3std8internal14unicode_tables__T9TrieEntryThVii8Vii7Vii6ZQz11__xopEqualsMxFKxSQDaQCzQCt__TQCgThVii8Vii7Vii6ZQCyZb@Base 12
+ _D3std8internal14unicode_tables__T9TrieEntryThVii8Vii7Vii6ZQz6__initZ@Base 12
+ _D3std8internal14unicode_tables__T9TrieEntryThVii8Vii7Vii6ZQz9__xtoHashFNbNeKxSQCzQCyQCs__TQCfThVii8Vii7Vii6ZQCxZm@Base 12
+ _D3std8internal14unicode_tables__T9TrieEntryTtVii12Vii9ZQw11__xopEqualsMxFKxSQCxQCwQCq__TQCdTtVii12Vii9ZQCsZb@Base 12
+ _D3std8internal14unicode_tables__T9TrieEntryTtVii12Vii9ZQw6__initZ@Base 12
+ _D3std8internal14unicode_tables__T9TrieEntryTtVii12Vii9ZQw9__xtoHashFNbNeKxSQCwQCvQCp__TQCcTtVii12Vii9ZQCrZm@Base 12
+ _D3std8internal14unicode_tables__T9TrieEntryTtVii8Vii7Vii6ZQz11__xopEqualsMxFKxSQDaQCzQCt__TQCgTtVii8Vii7Vii6ZQCyZb@Base 12
+ _D3std8internal14unicode_tables__T9TrieEntryTtVii8Vii7Vii6ZQz6__initZ@Base 12
+ _D3std8internal14unicode_tables__T9TrieEntryTtVii8Vii7Vii6ZQz9__xtoHashFNbNeKxSQCzQCyQCs__TQCfTtVii8Vii7Vii6ZQCxZm@Base 12
+ _D3std8internal14unicode_tables__T9TrieEntryTtVii8Vii8Vii5ZQz11__xopEqualsMxFKxSQDaQCzQCt__TQCgTtVii8Vii8Vii5ZQCyZb@Base 12
+ _D3std8internal14unicode_tables__T9TrieEntryTtVii8Vii8Vii5ZQz6__initZ@Base 12
+ _D3std8internal14unicode_tables__T9TrieEntryTtVii8Vii8Vii5ZQz9__xtoHashFNbNeKxSQCzQCyQCs__TQCfTtVii8Vii8Vii5ZQCxZm@Base 12
+ _D3std8internal16unicode_grapheme11__moduleRefZ@Base 12
+ _D3std8internal16unicode_grapheme12__ModuleInfoZ@Base 12
+ _D3std8internal4math11biguintcore10inplaceSubFNaNbNfAkAxkQdZb@Base 12
+ _D3std8internal4math11biguintcore11__moduleRefZ@Base 12
+ _D3std8internal4math11biguintcore11blockDivModFNaNbNfAkQcIAkZv@Base 12
+ _D3std8internal4math11biguintcore11includeSignFNaNbNfMAxkmbZAk@Base 12
+ _D3std8internal4math11biguintcore11mulInternalFNaNbNfAkAxkQdZv@Base 12
+ _D3std8internal4math11biguintcore12__ModuleInfoZ@Base 12
+ _D3std8internal4math11biguintcore12biguintToHexFNaNbNfNkMAaMxAkaEQCl5ascii10LetterCaseZQBe@Base 12
+ _D3std8internal4math11biguintcore12mulKaratsubaFNaNbNfAkAxkQdQhZv@Base 12
+ _D3std8internal4math11biguintcore12squareSimpleFNaNbNfAkAxkZv@Base 12
+ _D3std8internal4math11biguintcore13getCacheLimitFNaNbNiNfZm@Base 12
+ _D3std8internal4math11biguintcore14biguintToOctalFNaNbNiNfAaAxkZm@Base 12
+ _D3std8internal4math11biguintcore14divModInternalFNaNbNfAkQcxAkxQdZv@Base 12
+ _D3std8internal4math11biguintcore14itoaZeroPaddedFNaNbNiNfAakZv@Base 12
+ _D3std8internal4math11biguintcore14squareInternalFNaNbNfAkxAkZv@Base 12
+ _D3std8internal4math11biguintcore14twosComplementFNaNbNfAxkAkZv@Base 12
+ _D3std8internal4math11biguintcore15addAssignSimpleFNaNbNfAkAxkZk@Base 12
+ _D3std8internal4math11biguintcore15adjustRemainderFNaNbNfAkQcAxklQibZv@Base 12
+ _D3std8internal4math11biguintcore15recursiveDivModFNaNbNfAkQcAxkQhbZv@Base 12
+ _D3std8internal4math11biguintcore15squareKaratsubaFNaNbNfAkxAkQfZv@Base 12
+ _D3std8internal4math11biguintcore15subAssignSimpleFNaNbNfAkAxkZk@Base 12
+ _D3std8internal4math11biguintcore15toHexZeroPaddedFNaNbNfAakEQCh5ascii10LetterCaseZ14lowerHexDigitsyAa@Base 12
+ _D3std8internal4math11biguintcore15toHexZeroPaddedFNaNbNfAakEQCh5ascii10LetterCaseZ14upperHexDigitsyAa@Base 12
+ _D3std8internal4math11biguintcore15toHexZeroPaddedFNaNbNfAakEQCh5ascii10LetterCaseZv@Base 12
+ _D3std8internal4math11biguintcore16biguintToDecimalFNaNbNfAaAkZm@Base 12
+ _D3std8internal4math11biguintcore16schoolbookDivModFNaNbNfAkQcIAkZv@Base 12
+ _D3std8internal4math11biguintcore17firstNonZeroDigitFNaNbNiNfxAkZi@Base 12
+ _D3std8internal4math11biguintcore18removeLeadingZerosFNaNbNfNkMANgkZQf@Base 12
+ _D3std8internal4math11biguintcore20addOrSubAssignSimpleFNaNbNfAkAxkbZk@Base 12
+ _D3std8internal4math11biguintcore21highestDifferentDigitFNaNbNiNfxAkxQdZm@Base 12
+ _D3std8internal4math11biguintcore24highestPowerBelowUintMaxFNaNbNfkZ6maxpwryG22h@Base 12
+ _D3std8internal4math11biguintcore24highestPowerBelowUintMaxFNaNbNfkZi@Base 12
+ _D3std8internal4math11biguintcore25highestPowerBelowUlongMaxFNaNbNfkZ6maxpwryG39h@Base 12
+ _D3std8internal4math11biguintcore25highestPowerBelowUlongMaxFNaNbNfkZi@Base 12
+ _D3std8internal4math11biguintcore25karatsubaRequiredBuffSizeFNaNbNfmZm@Base 12
+ _D3std8internal4math11biguintcore3ONEyAk@Base 12
+ _D3std8internal4math11biguintcore3TENyAk@Base 12
+ _D3std8internal4math11biguintcore3TWOyAk@Base 12
+ _D3std8internal4math11biguintcore3addFNaNbNfMxAkMxQeZAk@Base 12
+ _D3std8internal4math11biguintcore3subFNaNbNfMxAkMxQePbZAk@Base 12
+ _D3std8internal4math11biguintcore4ZEROyAk@Base 12
+ _D3std8internal4math11biguintcore4lessFNaNbNfAxkQdZb@Base 12
+ _D3std8internal4math11biguintcore6addIntFNaNbNfxAkmZAk@Base 12
+ _D3std8internal4math11biguintcore6subIntFNaNbNfxAkmZAk@Base 12
+ _D3std8internal4math11biguintcore7BigUint10uintLengthMxFNaNbNdNiNlNfZm@Base 12
+ _D3std8internal4math11biguintcore7BigUint11__invariantMxFNaZv@Base 12
+ _D3std8internal4math11biguintcore7BigUint11__xopEqualsMxFKxSQCgQCfQBzQBxQBnZb@Base 12
+ _D3std8internal4math11biguintcore7BigUint11toHexStringMxFNaNbNlNfiaiaEQCq5ascii10LetterCaseZAa@Base 12
+ _D3std8internal4math11biguintcore7BigUint11ulongLengthMxFNaNbNdNiNlNfZm@Base 12
+ _D3std8internal4math11biguintcore7BigUint12__invariant0MxFNaZv@Base 12
+ _D3std8internal4math11biguintcore7BigUint13toOctalStringMxFNaNbNlNfZAa@Base 12
+ _D3std8internal4math11biguintcore7BigUint15__funcliteral34FNaNbNiNeAkZAyk@Base 12
+ _D3std8internal4math11biguintcore7BigUint15toDecimalStringMxFNaNbNlNfiZAa@Base 12
+ _D3std8internal4math11biguintcore7BigUint3divFNaNbNfNkMSQCcQCbQBvQBtQBjMQrZQu@Base 12
+ _D3std8internal4math11biguintcore7BigUint3modFNaNbNfNkMSQCcQCbQBvQBtQBjMQrZQu@Base 12
+ _D3std8internal4math11biguintcore7BigUint3mulFNaNbNfMSQCaQBzQBtQBrQBhMQrZQu@Base 12
+ _D3std8internal4math11biguintcore7BigUint3powFNaNbNfNkMSQCcQCbQBvQBtQBjmZQs@Base 12
+ _D3std8internal4math11biguintcore7BigUint6__ctorMFNaNbNcNiNfNkMAykZSQCoQCnQChQCfQBv@Base 12
+ _D3std8internal4math11biguintcore7BigUint6__initZ@Base 12
+ _D3std8internal4math11biguintcore7BigUint6divModFNaNbNfSQCcQCbQBvQBtQBjMQrJQuJQxZv@Base 12
+ _D3std8internal4math11biguintcore7BigUint6isZeroMxFNaNbNiNlNfZb@Base 12
+ _D3std8internal4math11biguintcore7BigUint6toHashMxFNaNbNiNlNfZm@Base 12
+ _D3std8internal4math11biguintcore7BigUint8__xopCmpMxFKxSQCcQCbQBvQBtQBjZi@Base 12
+ _D3std8internal4math11biguintcore7BigUint8addOrSubFNaNbNfMSQCfQCeQByQBwQBmMQrbKbZQx@Base 12
+ _D3std8internal4math11biguintcore7BigUint8numBytesMxFNaNbNiNlNfZm@Base 12
+ _D3std8internal4math11biguintcore7BigUint8peekUintMxFNaNbNiNlNfmZk@Base 12
+ _D3std8internal4math11biguintcore7BigUint9peekUlongMxFNaNbNiNlNfmZm@Base 12
+ _D3std8internal4math11biguintcore7BigUint__T5opCmpTvZQjMxFNaNbNiNlNfxSQCqQCpQCjQChQBxZi@Base 12
+ _D3std8internal4math11biguintcore7BigUint__T6divIntTykZQlFNaNbNfNkMSQCoQCnQChQCfQBvykZQt@Base 12
+ _D3std8internal4math11biguintcore7BigUint__T6modIntTykZQlFNaNbNfMSQCmQClQCfQCdQBtykZk@Base 12
+ _D3std8internal4math11biguintcore7BigUint__T8opAssignTmZQmMFNaNbNlNfmZv@Base 12
+ _D3std8internal4math11biguintcore7BigUint__T8opAssignTvZQmMFNaNbNiNlNfSQCrQCqQCkQCiQByZv@Base 12
+ _D3std8internal4math11biguintcore7BigUint__T8opBinaryVAyaa2_3c3cTmZQxMxFNaNbNlNfmZSQDdQDcQCwQCuQCk@Base 12
+ _D3std8internal4math11biguintcore7BigUint__T8opBinaryVAyaa2_3e3eTmZQxMxFNaNbNjNfmZSQDdQDcQCwQCuQCk@Base 12
+ _D3std8internal4math11biguintcore7BigUint__T8opEqualsTvZQmMxFNaNbNiNlNfKxSQCuQCtQCnQClQCbZb@Base 12
+ _D3std8internal4math11biguintcore7BigUint__T8opEqualsTvZQmMxFNaNbNiNlNfmZb@Base 12
+ _D3std8internal4math11biguintcore9addSimpleFNaNbNfAkxAkxQdZk@Base 12
+ _D3std8internal4math11biguintcore9mulSimpleFNaNbNfAkAxkQdZv@Base 12
+ _D3std8internal4math11biguintcore9subSimpleFNaNbAkAxkQdZk@Base 12
+ _D3std8internal4math11biguintcore__T6intpowTkZQkFNaNbNiNfkmZk@Base 12
+ _D3std8internal4math12biguintnoasm11__moduleRefZ@Base 12
+ _D3std8internal4math12biguintnoasm12__ModuleInfoZ@Base 12
+ _D3std8internal4math12biguintnoasm12multibyteMulFNaNbNiNfAkAxkkkZk@Base 12
+ _D3std8internal4math12biguintnoasm12multibyteShlFNaNbNiNfAkAxkkZk@Base 12
+ _D3std8internal4math12biguintnoasm12multibyteShrFNaNbNiNfAkAxkkZv@Base 12
+ _D3std8internal4math12biguintnoasm15multibyteSquareFNaNbNiNfAkAxkZv@Base 12
+ _D3std8internal4math12biguintnoasm18multibyteDivAssignFNaNbNiNfAkkkZk@Base 12
+ _D3std8internal4math12biguintnoasm27multibyteAddDiagonalSquaresFNaNbNiNfAkAxkZv@Base 12
+ _D3std8internal4math12biguintnoasm27multibyteMultiplyAccumulateFNaNbNiNfAkAxkQdZv@Base 12
+ _D3std8internal4math12biguintnoasm27multibyteTriangleAccumulateFNaNbNiNfAkAxkZv@Base 12
+ _D3std8internal4math12biguintnoasm__T15multibyteAddSubVai43ZQxFNaNbNiNfAkAxkQdkZk@Base 12
+ _D3std8internal4math12biguintnoasm__T15multibyteAddSubVai45ZQxFNaNbNiNfAkAxkQdkZk@Base 12
+ _D3std8internal4math12biguintnoasm__T15multibyteMulAddVai43ZQxFNaNbNiNfAkAxkkkZk@Base 12
+ _D3std8internal4math12biguintnoasm__T15multibyteMulAddVai45ZQxFNaNbNiNfAkAxkkkZk@Base 12
+ _D3std8internal4math12biguintnoasm__T24multibyteIncrementAssignVai43ZQBgFNaNbNiNfAkkZk@Base 12
+ _D3std8internal4math12biguintnoasm__T24multibyteIncrementAssignVai45ZQBgFNaNbNiNfAkkZk@Base 12
+ _D3std8internal4math13errorfunction11__moduleRefZ@Base 12
+ _D3std8internal4math13errorfunction12__ModuleInfoZ@Base 12
+ _D3std8internal4math13errorfunction1PyG10e@Base 12
+ _D3std8internal4math13errorfunction1QyG11e@Base 12
+ _D3std8internal4math13errorfunction1RyG5e@Base 12
+ _D3std8internal4math13errorfunction1SyG6e@Base 12
+ _D3std8internal4math13errorfunction1TyG7e@Base 12
+ _D3std8internal4math13errorfunction1UyG7e@Base 12
+ _D3std8internal4math13errorfunction22normalDistributionImplFNaNbNiNfeZe@Base 12
+ _D3std8internal4math13errorfunction25normalDistributionInvImplFNaNbNiNfeZ2P0yG8e@Base 12
+ _D3std8internal4math13errorfunction25normalDistributionInvImplFNaNbNiNfeZ2P1yG10e@Base 12
+ _D3std8internal4math13errorfunction25normalDistributionInvImplFNaNbNiNfeZ2P2yG8e@Base 12
+ _D3std8internal4math13errorfunction25normalDistributionInvImplFNaNbNiNfeZ2P3yG8e@Base 12
+ _D3std8internal4math13errorfunction25normalDistributionInvImplFNaNbNiNfeZ2Q0yG8e@Base 12
+ _D3std8internal4math13errorfunction25normalDistributionInvImplFNaNbNiNfeZ2Q1yG10e@Base 12
+ _D3std8internal4math13errorfunction25normalDistributionInvImplFNaNbNiNfeZ2Q2yG8e@Base 12
+ _D3std8internal4math13errorfunction25normalDistributionInvImplFNaNbNiNfeZ2Q3yG8e@Base 12
+ _D3std8internal4math13errorfunction25normalDistributionInvImplFNaNbNiNfeZe@Base 12
+ _D3std8internal4math13errorfunction3erfFNaNbNiNfeZe@Base 12
+ _D3std8internal4math13errorfunction4erfcFNaNbNiNfeZe@Base 12
+ _D3std8internal4math13errorfunction5EXP_2ye@Base 12
+ _D3std8internal4math13errorfunction5erfceFNaNbNiNfeZe@Base 12
+ _D3std8internal4math13errorfunction5expx2FNaNbNiNfeiZe@Base 12
+ _D3std8internal4math13errorfunction__T12rationalPolyTeZQrFNaNbNiNfeAxeQdZe@Base 12
+ _D3std8internal4math13gammafunction10EULERGAMMAye@Base 12
+ _D3std8internal4math13gammafunction11__moduleRefZ@Base 12
+ _D3std8internal4math13gammafunction11logmdigammaFNaNbNiNfeZe@Base 12
+ _D3std8internal4math13gammafunction12__ModuleInfoZ@Base 12
+ _D3std8internal4math13gammafunction13gammaStirlingFNaNbNiNfeZ19LargeStirlingCoeffsyG7e@Base 12
+ _D3std8internal4math13gammafunction13gammaStirlingFNaNbNiNfeZ19SmallStirlingCoeffsyG9e@Base 12
+ _D3std8internal4math13gammafunction13gammaStirlingFNaNbNiNfeZe@Base 12
+ _D3std8internal4math13gammafunction14betaIncompleteFNaNbNiNfeeeZe@Base 12
+ _D3std8internal4math13gammafunction15gammaIncompleteFNaNbNiNfeeZe@Base 12
+ _D3std8internal4math13gammafunction16GammaSmallCoeffsyG9e@Base 12
+ _D3std8internal4math13gammafunction16igammaTemmeLargeFNaNbNiNfeeZ4coefyG13Ae@Base 12
+ _D3std8internal4math13gammafunction16igammaTemmeLargeFNaNbNiNfeeZe@Base 12
+ _D3std8internal4math13gammafunction17betaIncompleteInvFNaNbNiNfeeeZe@Base 12
+ _D3std8internal4math13gammafunction17logGammaNumeratoryG7e@Base 12
+ _D3std8internal4math13gammafunction18betaDistExpansion1FNaNbNiNfeeeZe@Base 12
+ _D3std8internal4math13gammafunction18betaDistExpansion2FNaNbNiNfeeeZe@Base 12
+ _D3std8internal4math13gammafunction18logmdigammaInverseFNaNbNiNfeZe@Base 12
+ _D3std8internal4math13gammafunction19GammaSmallNegCoeffsyG9e@Base 12
+ _D3std8internal4math13gammafunction19betaDistPowerSeriesFNaNbNiNfeeeZe@Base 12
+ _D3std8internal4math13gammafunction19logGammaDenominatoryG8e@Base 12
+ _D3std8internal4math13gammafunction20GammaNumeratorCoeffsyG8e@Base 12
+ _D3std8internal4math13gammafunction20gammaIncompleteComplFNaNbNiNfeeZe@Base 12
+ _D3std8internal4math13gammafunction22GammaDenominatorCoeffsyG9e@Base 12
+ _D3std8internal4math13gammafunction22logGammaStirlingCoeffsyG7e@Base 12
+ _D3std8internal4math13gammafunction23gammaIncompleteComplInvFNaNbNiNfeeZe@Base 12
+ _D3std8internal4math13gammafunction4Bn_nyG7e@Base 12
+ _D3std8internal4math13gammafunction5gammaFNaNbNiNfeZe@Base 12
+ _D3std8internal4math13gammafunction7digammaFNaNbNiNfeZe@Base 12
+ _D3std8internal4math13gammafunction8logGammaFNaNbNiNfeZe@Base 12
+ _D3std8internal4test10dummyrange11__moduleRefZ@Base 12
+ _D3std8internal4test10dummyrange12__ModuleInfoZ@Base 12
+ _D3std8internal4test10dummyrange7TestFoo6__initZ@Base 12
+ _D3std8internal4test10dummyrange7TestFoo8opEqualsMxFKxSQCbQCaQBuQBsQBjZb@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei1TAkZQDm11__xopEqualsMxFKxSQFpQFoQFiQFg__TQExVQEoi0VQDvi0VQDei1TQCkZQFxZb@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei1TAkZQDm12uinttestDatayAk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei1TAkZQDm4saveMFNaNbNdNiNfZSQFpQFoQFiQFg__TQExVQEoi0VQDvi0VQDei1TQCkZQFx@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei1TAkZQDm5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei1TAkZQDm5frontMNgFNaNbNcNdNiNfZNgk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei1TAkZQDm6__initZ@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei1TAkZQDm6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei1TAkZQDm6reinitMFNaNbNfZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei1TAkZQDm8popFrontMFNaNbNiNfZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei1TAkZQDm9__xtoHashFNbNeKxSQFoQFnQFhQFf__TQEwVQEni0VQDui0VQDdi1TQCjZQFwZm@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei2TAkZQDm11__xopEqualsMxFKxSQFpQFoQFiQFg__TQExVQEoi0VQDvi0VQDei2TQCkZQFxZb@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei2TAkZQDm12uinttestDatayAk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei2TAkZQDm4backMNgFNaNbNcNdNiNfZNgk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei2TAkZQDm4saveMFNaNbNdNiNfZSQFpQFoQFiQFg__TQExVQEoi0VQDvi0VQDei2TQCkZQFx@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei2TAkZQDm5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei2TAkZQDm5frontMNgFNaNbNcNdNiNfZNgk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei2TAkZQDm6__initZ@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei2TAkZQDm6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei2TAkZQDm6reinitMFNaNbNfZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei2TAkZQDm7popBackMFNaNbNiNfZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei2TAkZQDm8popFrontMFNaNbNiNfZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei2TAkZQDm9__xtoHashFNbNeKxSQFoQFnQFhQFf__TQEwVQEni0VQDui0VQDdi2TQCjZQFwZm@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm11__xopEqualsMxFKxSQFpQFoQFiQFg__TQExVQEoi0VQDvi0VQDei3TQCkZQFxZb@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm12uinttestDatayAk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm4backMNgFNaNbNcNdNiNfZNgk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm4saveMFNaNbNdNiNfZSQFpQFoQFiQFg__TQExVQEoi0VQDvi0VQDei3TQCkZQFx@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm5frontMNgFNaNbNcNdNiNfZNgk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm6__initZ@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm6reinitMFNaNbNfZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm7opIndexMNgFNaNbNcNiNfmZNgk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm7opSliceMFNaNbNiNfZSQFqQFpQFjQFh__TQEyVQEpi0VQDwi0VQDfi3TQClZQFy@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm7opSliceMFNaNbNiNfmmZSQFsQFrQFlQFj__TQFaVQEri0VQDyi0VQDhi3TQCnZQGa@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm7popBackMFNaNbNiNfZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm8popFrontMFNaNbNiNfZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm9__xtoHashFNbNeKxSQFoQFnQFhQFf__TQEwVQEni0VQDui0VQDdi3TQCjZQFwZm@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei1TAkZQDm11__xopEqualsMxFKxSQFpQFoQFiQFg__TQExVQEoi0VQDvi1VQDei1TQCkZQFxZb@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei1TAkZQDm12uinttestDatayAk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei1TAkZQDm4saveMFNaNbNdNiNfZSQFpQFoQFiQFg__TQExVQEoi0VQDvi1VQDei1TQCkZQFx@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei1TAkZQDm5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei1TAkZQDm5frontMNgFNaNbNcNdNiNfZNgk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei1TAkZQDm6__initZ@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei1TAkZQDm6reinitMFNaNbNfZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei1TAkZQDm8popFrontMFNaNbNiNfZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei1TAkZQDm9__xtoHashFNbNeKxSQFoQFnQFhQFf__TQEwVQEni0VQDui1VQDdi1TQCjZQFwZm@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei2TAkZQDm11__xopEqualsMxFKxSQFpQFoQFiQFg__TQExVQEoi0VQDvi1VQDei2TQCkZQFxZb@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei2TAkZQDm12uinttestDatayAk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei2TAkZQDm4backMNgFNaNbNcNdNiNfZNgk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei2TAkZQDm4saveMFNaNbNdNiNfZSQFpQFoQFiQFg__TQExVQEoi0VQDvi1VQDei2TQCkZQFx@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei2TAkZQDm5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei2TAkZQDm5frontMNgFNaNbNcNdNiNfZNgk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei2TAkZQDm6__initZ@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei2TAkZQDm6reinitMFNaNbNfZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei2TAkZQDm7popBackMFNaNbNiNfZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei2TAkZQDm8popFrontMFNaNbNiNfZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi0VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei2TAkZQDm9__xtoHashFNbNeKxSQFoQFnQFhQFf__TQEwVQEni0VQDui1VQDdi2TQCjZQFwZm@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei0TAkZQDm11__xopEqualsMxFKxSQFpQFoQFiQFg__TQExVQEoi1VQDvi0VQDei0TQCkZQFxZb@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei0TAkZQDm12uinttestDatayAk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei0TAkZQDm5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei0TAkZQDm5frontMFNaNbNdNiNfkZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei0TAkZQDm5frontMxFNaNbNdNiNfZk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei0TAkZQDm6__initZ@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei0TAkZQDm6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei0TAkZQDm6reinitMFNaNbNfZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei0TAkZQDm8popFrontMFNaNbNiNfZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei0TAkZQDm9__xtoHashFNbNeKxSQFoQFnQFhQFf__TQEwVQEni1VQDui0VQDdi0TQCjZQFwZm@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei1TAkZQDm11__xopEqualsMxFKxSQFpQFoQFiQFg__TQExVQEoi1VQDvi0VQDei1TQCkZQFxZb@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei1TAkZQDm12uinttestDatayAk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei1TAkZQDm4saveMFNaNbNdNiNfZSQFpQFoQFiQFg__TQExVQEoi1VQDvi0VQDei1TQCkZQFx@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei1TAkZQDm5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei1TAkZQDm5frontMFNaNbNdNiNfkZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei1TAkZQDm5frontMxFNaNbNdNiNfZk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei1TAkZQDm6__initZ@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei1TAkZQDm6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei1TAkZQDm6reinitMFNaNbNfZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei1TAkZQDm8popFrontMFNaNbNiNfZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei1TAkZQDm9__xtoHashFNbNeKxSQFoQFnQFhQFf__TQEwVQEni1VQDui0VQDdi1TQCjZQFwZm@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei2TAkZQDm11__xopEqualsMxFKxSQFpQFoQFiQFg__TQExVQEoi1VQDvi0VQDei2TQCkZQFxZb@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei2TAkZQDm12uinttestDatayAk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei2TAkZQDm4backMFNaNbNdNiNfkZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei2TAkZQDm4backMxFNaNbNdNiNfZk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei2TAkZQDm4saveMFNaNbNdNiNfZSQFpQFoQFiQFg__TQExVQEoi1VQDvi0VQDei2TQCkZQFx@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei2TAkZQDm5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei2TAkZQDm5frontMFNaNbNdNiNfkZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei2TAkZQDm5frontMxFNaNbNdNiNfZk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei2TAkZQDm6__initZ@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei2TAkZQDm6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei2TAkZQDm6reinitMFNaNbNfZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei2TAkZQDm7popBackMFNaNbNiNfZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei2TAkZQDm8popFrontMFNaNbNiNfZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei2TAkZQDm9__xtoHashFNbNeKxSQFoQFnQFhQFf__TQEwVQEni1VQDui0VQDdi2TQCjZQFwZm@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm11__xopEqualsMxFKxSQFpQFoQFiQFg__TQExVQEoi1VQDvi0VQDei3TQCkZQFxZb@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm12uinttestDatayAk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm13opIndexAssignMFNaNbNiNfkmZk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm4backMFNaNbNdNiNfkZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm4backMxFNaNbNdNiNfZk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm4saveMFNaNbNdNiNfZSQFpQFoQFiQFg__TQExVQEoi1VQDvi0VQDei3TQCkZQFx@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm5frontMFNaNbNdNiNfkZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm5frontMxFNaNbNdNiNfZk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm6__initZ@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm6lengthMxFNaNbNdNiNfZm@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm6reinitMFNaNbNfZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm7opIndexMxFNaNbNiNfmZk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm7opSliceMFNaNbNiNfZSQFqQFpQFjQFh__TQEyVQEpi1VQDwi0VQDfi3TQClZQFy@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm7opSliceMFNaNbNiNfmmZSQFsQFrQFlQFj__TQFaVQEri1VQDyi0VQDhi3TQCnZQGa@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm7popBackMFNaNbNiNfZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm8popFrontMFNaNbNiNfZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi0VEQDrQDqQDkQDi9RangeTypei3TAkZQDm9__xtoHashFNbNeKxSQFoQFnQFhQFf__TQEwVQEni1VQDui0VQDdi3TQCjZQFwZm@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei0TAkZQDm11__xopEqualsMxFKxSQFpQFoQFiQFg__TQExVQEoi1VQDvi1VQDei0TQCkZQFxZb@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei0TAkZQDm12uinttestDatayAk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei0TAkZQDm5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei0TAkZQDm5frontMFNaNbNdNiNfkZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei0TAkZQDm5frontMxFNaNbNdNiNfZk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei0TAkZQDm6__initZ@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei0TAkZQDm6reinitMFNaNbNfZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei0TAkZQDm8popFrontMFNaNbNiNfZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei0TAkZQDm9__xtoHashFNbNeKxSQFoQFnQFhQFf__TQEwVQEni1VQDui1VQDdi0TQCjZQFwZm@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei1TAkZQDm11__xopEqualsMxFKxSQFpQFoQFiQFg__TQExVQEoi1VQDvi1VQDei1TQCkZQFxZb@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei1TAkZQDm12uinttestDatayAk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei1TAkZQDm4saveMFNaNbNdNiNfZSQFpQFoQFiQFg__TQExVQEoi1VQDvi1VQDei1TQCkZQFx@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei1TAkZQDm5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei1TAkZQDm5frontMFNaNbNdNiNfkZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei1TAkZQDm5frontMxFNaNbNdNiNfZk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei1TAkZQDm6__initZ@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei1TAkZQDm6reinitMFNaNbNfZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei1TAkZQDm8popFrontMFNaNbNiNfZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei1TAkZQDm9__xtoHashFNbNeKxSQFoQFnQFhQFf__TQEwVQEni1VQDui1VQDdi1TQCjZQFwZm@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei2TAkZQDm11__xopEqualsMxFKxSQFpQFoQFiQFg__TQExVQEoi1VQDvi1VQDei2TQCkZQFxZb@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei2TAkZQDm12uinttestDatayAk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei2TAkZQDm4backMFNaNbNdNiNfkZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei2TAkZQDm4backMxFNaNbNdNiNfZk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei2TAkZQDm4saveMFNaNbNdNiNfZSQFpQFoQFiQFg__TQExVQEoi1VQDvi1VQDei2TQCkZQFx@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei2TAkZQDm5emptyMxFNaNbNdNiNfZb@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei2TAkZQDm5frontMFNaNbNdNiNfkZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei2TAkZQDm5frontMxFNaNbNdNiNfZk@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei2TAkZQDm6__initZ@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei2TAkZQDm6reinitMFNaNbNfZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei2TAkZQDm7popBackMFNaNbNiNfZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei2TAkZQDm8popFrontMFNaNbNiNfZv@Base 12
+ _D3std8internal4test10dummyrange__T10DummyRangeVEQBvQBuQBoQBm8ReturnByi1VEQCuQCtQCnQCl6Lengthi1VEQDrQDqQDkQDi9RangeTypei2TAkZQDm9__xtoHashFNbNeKxSQFoQFnQFhQFf__TQEwVQEni1VQDui1VQDdi2TQCjZQFwZm@Base 12
+ _D3std8internal4test3uda11__moduleRefZ@Base 12
+ _D3std8internal4test3uda12__ModuleInfoZ@Base 12
+ _D3std8internal4test3uda17HasPrivateMembers6__initZ@Base 12
+ _D3std8internal4test5range11__moduleRefZ@Base 12
+ _D3std8internal4test5range12__ModuleInfoZ@Base 12
+ _D3std8internal6memory11__moduleRefZ@Base 12
+ _D3std8internal6memory12__ModuleInfoZ@Base 12
+ _D3std8internal6memory__T13enforceCallocZQqFNaNbNiNfmmZPv@Base 12
+ _D3std8internal6memory__T13enforceMallocZQqFNaNbNiNfmZPv@Base 12
+ _D3std8internal6memory__T14enforceReallocZQrFNaNbNiNkMPvmZQe@Base 12
+ _D3std8internal7cstring11__moduleRefZ@Base 12
+ _D3std8internal7cstring12__ModuleInfoZ@Base 12
+ _D3std8internal7cstring__T11tempCStringTaTANgaZQvFNaNbNiNfMQrZSQCjQCiQCc__T17TempCStringBufferTaZQw@Base 12
+ _D3std8internal7cstring__T11tempCStringTaTAxaZQuFNaNbNiNfMQqZSQCiQChQCb__T17TempCStringBufferTaZQw@Base 12
+ _D3std8internal7cstring__T11tempCStringTaTAyaZQuFNaNbNiNfMQqZSQCiQChQCb__T17TempCStringBufferTaZQw@Base 12
+ _D3std8internal7cstring__T11tempCStringTaTSQBp4path__T16asNormalizedPathTSQCu5range__T5chainTSQDo3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQFnQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFoFNkMQFcZQtZQHeFNaNbNiMQGzZSQIsQIrQIl__T17TempCStringBufferTaZQw@Base 12
+ _D3std8internal7cstring__T11tempCStringTaTSQBp5range__T5chainTSQCj3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQEiQCt__T10OnlyResultTaZQpTQDbZQDmFQDjQBnQDpZ6ResultZQFlFNaNbNiNfMQFiZSQHbQHaQGu__T17TempCStringBufferTaZQw@Base 12
+ _D3std8internal7cstring__T14trustedReallocTaZQtFNaNbNiNeNkMAaZQd@Base 12
+ _D3std8internal7cstring__T17TempCStringBufferTaZQw14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D3std8internal7cstring__T17TempCStringBufferTaZQw15trustedVoidInitFNaNbNiNeZSQCyQCxQCr__TQCmTaZQCs@Base 12
+ _D3std8internal7cstring__T17TempCStringBufferTaZQw3ptrMxFNaNbNdNiNeZPxa@Base 12
+ _D3std8internal7cstring__T17TempCStringBufferTaZQw6__dtorMFNaNbNiNeZv@Base 12
+ _D3std8internal7cstring__T17TempCStringBufferTaZQw6__initZ@Base 12
+ _D3std8internal7cstring__T17TempCStringBufferTaZQw7buffPtrMNgFNaNbNdNiNjNeZPNga@Base 12
+ _D3std8internal7cstring__T17TempCStringBufferTaZQw7opIndexMxFNaNbNiNeZAxa@Base 12
+ _D3std8internal7cstring__T17TempCStringBufferTaZQw8opAssignMFNaNbNcNiNjNeSQCuQCtQCn__TQCiTaZQCoZQx@Base 12
+ _D3std8internal7cstring__T19trustedReallocStackTaZQyFNaNbNiNeMAamZQe@Base 12
+ _D3std8internal7windows8advapi3211__moduleRefZ@Base 12
+ _D3std8internal7windows8advapi3212__ModuleInfoZ@Base 12
+ _D3std8typecons10Structural11__InterfaceZ@Base 12
+ _D3std8typecons11__moduleRefZ@Base 12
+ _D3std8typecons12__ModuleInfoZ@Base 12
+ _D3std8typecons19NotImplementedError6__ctorMFNaNbNfAyaZCQCcQCbQBv@Base 12
+ _D3std8typecons19NotImplementedError6__initZ@Base 12
+ _D3std8typecons19NotImplementedError6__vtblZ@Base 12
+ _D3std8typecons19NotImplementedError7__ClassZ@Base 12
+ _D3std8typecons2No6__initZ@Base 12
+ _D3std8typecons3Yes6__initZ@Base 12
+ _D3std8typecons7Ternary4makeFNaNbNiNfhZSQBmQBlQBf@Base 12
+ _D3std8typecons7Ternary6__ctorMFNaNbNcNiNfbZSQBrQBqQBk@Base 12
+ _D3std8typecons7Ternary6__ctorMFNaNbNcNiNfxSQBqQBpQBjZSQCbQCaQBu@Base 12
+ _D3std8typecons7Ternary6__initZ@Base 12
+ _D3std8typecons7Ternary8opAssignMFNaNbNiNfbZv@Base 12
+ _D3std8typecons__T10RebindableTxCQBf5regex8internal2ir__T14MatcherFactoryTaZQtZQCj11__xopEqualsMxFKxSQDvQDu__TQDoTxQDfZQDxZb@Base 12
+ _D3std8typecons__T10RebindableTxCQBf5regex8internal2ir__T14MatcherFactoryTaZQtZQCj6__initZ@Base 12
+ _D3std8typecons__T10RebindableTxCQBf5regex8internal2ir__T14MatcherFactoryTaZQtZQCj8__mixin13getMNgFNaNbNdNiNjNeZNgxCQEkQDfQDcQCw__TQCwTaZQDc@Base 12
+ _D3std8typecons__T10RebindableTxCQBf5regex8internal2ir__T14MatcherFactoryTaZQtZQCj8__mixin16__ctorMFNaNbNcNiNfxQDbZSQEkQEj__TQEdTxQDuZQEm@Base 12
+ _D3std8typecons__T10RebindableTxCQBf5regex8internal2ir__T14MatcherFactoryTaZQtZQCj8__mixin18opAssignMFNaNbNiNeSQEfQEe__TQDyTxQDpZQEhZv@Base 12
+ _D3std8typecons__T10RebindableTxCQBf5regex8internal2ir__T14MatcherFactoryTaZQtZQCj8__mixin18opAssignMFNaNbNiNfNkMxQDeZv@Base 12
+ _D3std8typecons__T10RebindableTxCQBf5regex8internal2ir__T14MatcherFactoryTaZQtZQCj8__mixin18opEqualsMxFxQCuZb@Base 12
+ _D3std8typecons__T10RebindableTxCQBf5regex8internal2ir__T14MatcherFactoryTaZQtZQCj8__mixin1__T8opEqualsZQkMxFKxSQEgQEf__TQDzTxQDqZQEiZb@Base 12
+ _D3std8typecons__T10RebindableTyCQBf8datetime8timezone8TimeZoneZQBu11__xopEqualsMxFKxSQDgQDf__TQCzTyQCqZQDiZb@Base 12
+ _D3std8typecons__T10RebindableTyCQBf8datetime8timezone8TimeZoneZQBu6__initZ@Base 12
+ _D3std8typecons__T10RebindableTyCQBf8datetime8timezone8TimeZoneZQBu8__mixin13getMNgFNaNbNdNiNjNeZyQCo@Base 12
+ _D3std8typecons__T10RebindableTyCQBf8datetime8timezone8TimeZoneZQBu8__mixin16__ctorMFNaNbNcNiNfyQCmZSQDvQDu__TQDoTyQDfZQDx@Base 12
+ _D3std8typecons__T10RebindableTyCQBf8datetime8timezone8TimeZoneZQBu8__mixin18opAssignMFNaNbNiNeSQDqQDp__TQDjTyQDaZQDsZv@Base 12
+ _D3std8typecons__T10RebindableTyCQBf8datetime8timezone8TimeZoneZQBu8__mixin18opAssignMFNaNbNiNfNkMyQCpZv@Base 12
+ _D3std8typecons__T10RebindableTyCQBf8datetime8timezone8TimeZoneZQBu8__mixin18opEqualsMxFxCQDkQCfQBzQBtZb@Base 12
+ _D3std8typecons__T10RebindableTyCQBf8datetime8timezone8TimeZoneZQBu8__mixin1__T8opEqualsZQkMxFKxSQDrQDq__TQDkTyQDbZQDtZb@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl3FTP4ImplVEQCbQCa24RefCountedAutoInitializei1ZQCu10__postblitMFNaNbNiNfZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl3FTP4ImplVEQCbQCa24RefCountedAutoInitializei1ZQCu15RefCountedStore13allocateStoreMFNaNbNiZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl3FTP4ImplVEQCbQCa24RefCountedAutoInitializei1ZQCu15RefCountedStore13isInitializedMxFNaNbNdNiNfZb@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl3FTP4ImplVEQCbQCa24RefCountedAutoInitializei1ZQCu15RefCountedStore15deallocateStoreMFNaNbNiZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl3FTP4ImplVEQCbQCa24RefCountedAutoInitializei1ZQCu15RefCountedStore4moveMFNaNbNiKQDpZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl3FTP4ImplVEQCbQCa24RefCountedAutoInitializei1ZQCu15RefCountedStore6__initZ@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl3FTP4ImplVEQCbQCa24RefCountedAutoInitializei1ZQCu15RefCountedStore8refCountMxFNaNbNdNiNfZm@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl3FTP4ImplVEQCbQCa24RefCountedAutoInitializei1ZQCu15RefCountedStoreQCk11__fieldDtorMFZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl3FTP4ImplVEQCbQCa24RefCountedAutoInitializei1ZQCu15RefCountedStoreQCk11__xopEqualsMxFKxSQFaQEz__TQEtTQEkVQDri1ZQFhQCnQEjZb@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl3FTP4ImplVEQCbQCa24RefCountedAutoInitializei1ZQCu15RefCountedStoreQCk6__initZ@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl3FTP4ImplVEQCbQCa24RefCountedAutoInitializei1ZQCu15RefCountedStoreQCk8opAssignMFNcNjSQExQEw__TQEqTQEhVQDoi1ZQFeQCkQEgZQBi@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl3FTP4ImplVEQCbQCa24RefCountedAutoInitializei1ZQCu15RefCountedStoreQCk9__xtoHashFNbNeKxSQEzQEy__TQEsTQEjVQDqi1ZQFgQCmQEiZm@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl3FTP4ImplVEQCbQCa24RefCountedAutoInitializei1ZQCu15RefCountedStore__T10initializeZQnMFNaNbNiZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl3FTP4ImplVEQCbQCa24RefCountedAutoInitializei1ZQCu15RefCountedStore__T17ensureInitializedZQuMFNaNbNiZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl3FTP4ImplVEQCbQCa24RefCountedAutoInitializei1ZQCu15refCountedStoreMNgFNaNbNcNdNiNfZNgSQEyQEx__TQErTQEiVQDpi1ZQFf15RefCountedStore@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl3FTP4ImplVEQCbQCa24RefCountedAutoInitializei1ZQCu17refCountedPayloadMFNaNbNcNdNiNjZQDs@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl3FTP4ImplVEQCbQCa24RefCountedAutoInitializei1ZQCu17refCountedPayloadMNgFNaNbNcNdNiNjNfZNgSQFcQDyQDxQDvQDu@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl3FTP4ImplVEQCbQCa24RefCountedAutoInitializei1ZQCu6__ctorMFNcQCvZSQEdQEc__TQDwTQDnVQCui1ZQEk@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl3FTP4ImplVEQCbQCa24RefCountedAutoInitializei1ZQCu6__dtorMFZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl3FTP4ImplVEQCbQCa24RefCountedAutoInitializei1ZQCu6__initZ@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl3FTP4ImplVEQCbQCa24RefCountedAutoInitializei1ZQCu8opAssignMFQCvZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl3FTP4ImplVEQCbQCa24RefCountedAutoInitializei1ZQCu8opAssignMFSQDzQDy__TQDsTQDjVQCqi1ZQEgZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4HTTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv10__postblitMFNaNbNiNfZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4HTTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv15RefCountedStore13allocateStoreMFNaNbNiZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4HTTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv15RefCountedStore13isInitializedMxFNaNbNdNiNfZb@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4HTTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv15RefCountedStore15deallocateStoreMFNaNbNiZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4HTTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv15RefCountedStore4moveMFNaNbNiKQDqZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4HTTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv15RefCountedStore6__initZ@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4HTTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv15RefCountedStore8refCountMxFNaNbNdNiNfZm@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4HTTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv15RefCountedStoreQCk11__fieldDtorMFZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4HTTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv15RefCountedStoreQCk11__xopEqualsMxFKxSQFbQFa__TQEuTQElVQDri1ZQFiQCnQEjZb@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4HTTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv15RefCountedStoreQCk6__initZ@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4HTTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv15RefCountedStoreQCk8opAssignMFNcNjSQEyQEx__TQErTQEiVQDoi1ZQFfQCkQEgZQBi@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4HTTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv15RefCountedStoreQCk9__xtoHashFNbNeKxSQFaQEz__TQEtTQEkVQDqi1ZQFhQCmQEiZm@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4HTTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv15RefCountedStore__T10initializeZQnMFNaNbNiZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4HTTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv15RefCountedStore__T17ensureInitializedZQuMFNaNbNiZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4HTTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv15refCountedStoreMNgFNaNbNcNdNiNfZNgSQEzQEy__TQEsTQEjVQDpi1ZQFg15RefCountedStore@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4HTTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv17refCountedPayloadMFNaNbNcNdNiNjZQDt@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4HTTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv17refCountedPayloadMNgFNaNbNcNdNiNjNfZNgSQFdQDzQDyQDwQDu@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4HTTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv6__ctorMFNcQCwZSQEeQEd__TQDxTQDoVQCui1ZQEl@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4HTTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv6__dtorMFZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4HTTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv6__initZ@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4HTTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv8opAssignMFQCwZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4HTTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv8opAssignMFSQEaQDz__TQDtTQDkVQCqi1ZQEhZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4SMTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv10__postblitMFNaNbNiNfZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4SMTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv15RefCountedStore13allocateStoreMFNaNbNiZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4SMTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv15RefCountedStore13isInitializedMxFNaNbNdNiNfZb@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4SMTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv15RefCountedStore15deallocateStoreMFNaNbNiZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4SMTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv15RefCountedStore4moveMFNaNbNiKQDqZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4SMTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv15RefCountedStore6__initZ@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4SMTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv15RefCountedStore8refCountMxFNaNbNdNiNfZm@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4SMTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv15RefCountedStoreQCk11__fieldDtorMFZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4SMTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv15RefCountedStoreQCk6__initZ@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4SMTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv15RefCountedStoreQCk8opAssignMFNcNjSQEyQEx__TQErTQEiVQDoi1ZQFfQCkQEgZQBi@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4SMTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv15RefCountedStore__T10initializeZQnMFNaNbNiZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4SMTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv15RefCountedStore__T17ensureInitializedZQuMFNaNbNiZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4SMTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv15refCountedStoreMNgFNaNbNcNdNiNfZNgSQEzQEy__TQEsTQEjVQDpi1ZQFg15RefCountedStore@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4SMTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv17refCountedPayloadMFNaNbNcNdNiNjZQDt@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4SMTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv17refCountedPayloadMNgFNaNbNcNdNiNjNfZNgSQFdQDzQDyQDwQDu@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4SMTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv6__ctorMFNcQCwZSQEeQEd__TQDxTQDoVQCui1ZQEl@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4SMTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv6__dtorMFZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4SMTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv6__initZ@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4SMTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv8opAssignMFQCwZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe3net4curl4SMTP4ImplVEQCcQCb24RefCountedAutoInitializei1ZQCv8opAssignMFSQEaQDz__TQDtTQDkVQCqi1ZQEhZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe4file15DirIteratorImplVEQCfQCe24RefCountedAutoInitializei0ZQCy10__postblitMFNaNbNiNfZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe4file15DirIteratorImplVEQCfQCe24RefCountedAutoInitializei0ZQCy15RefCountedStore13allocateStoreMFNaNbNiZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe4file15DirIteratorImplVEQCfQCe24RefCountedAutoInitializei0ZQCy15RefCountedStore13isInitializedMxFNaNbNdNiNfZb@Base 12
+ _D3std8typecons__T10RefCountedTSQBe4file15DirIteratorImplVEQCfQCe24RefCountedAutoInitializei0ZQCy15RefCountedStore15deallocateStoreMFNaNbNiZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe4file15DirIteratorImplVEQCfQCe24RefCountedAutoInitializei0ZQCy15RefCountedStore4Impl11__fieldDtorMFNeZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe4file15DirIteratorImplVEQCfQCe24RefCountedAutoInitializei0ZQCy15RefCountedStore4Impl11__xopEqualsMxFKxSQFgQFf__TQEzTQEqVQDti0ZQFnQCpQCbZb@Base 12
+ _D3std8typecons__T10RefCountedTSQBe4file15DirIteratorImplVEQCfQCe24RefCountedAutoInitializei0ZQCy15RefCountedStore4Impl6__initZ@Base 12
+ _D3std8typecons__T10RefCountedTSQBe4file15DirIteratorImplVEQCfQCe24RefCountedAutoInitializei0ZQCy15RefCountedStore4Impl8opAssignMFNcNjNeSQFfQFe__TQEyTQEpVQDsi0ZQFmQCoQCaZQBi@Base 12
+ _D3std8typecons__T10RefCountedTSQBe4file15DirIteratorImplVEQCfQCe24RefCountedAutoInitializei0ZQCy15RefCountedStore4Impl9__xtoHashFNbNeKxSQFfQFe__TQEyTQEpVQDsi0ZQFmQCoQCaZm@Base 12
+ _D3std8typecons__T10RefCountedTSQBe4file15DirIteratorImplVEQCfQCe24RefCountedAutoInitializei0ZQCy15RefCountedStore4moveMFNaNbNiKQDtZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe4file15DirIteratorImplVEQCfQCe24RefCountedAutoInitializei0ZQCy15RefCountedStore6__initZ@Base 12
+ _D3std8typecons__T10RefCountedTSQBe4file15DirIteratorImplVEQCfQCe24RefCountedAutoInitializei0ZQCy15RefCountedStore8refCountMxFNaNbNdNiNfZm@Base 12
+ _D3std8typecons__T10RefCountedTSQBe4file15DirIteratorImplVEQCfQCe24RefCountedAutoInitializei0ZQCy15RefCountedStore__T10initializeTAyaTEQFdQDz8SpanModeTbZQBkMFKQBdKQBdKbZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe4file15DirIteratorImplVEQCfQCe24RefCountedAutoInitializei0ZQCy15refCountedStoreMNgFNaNbNcNdNiNfZNgSQFcQFb__TQEvTQEmVQDpi0ZQFj15RefCountedStore@Base 12
+ _D3std8typecons__T10RefCountedTSQBe4file15DirIteratorImplVEQCfQCe24RefCountedAutoInitializei0ZQCy17refCountedPayloadMNgFNaNbNcNdNiNjNfZNgSQFgQEcQEa@Base 12
+ _D3std8typecons__T10RefCountedTSQBe4file15DirIteratorImplVEQCfQCe24RefCountedAutoInitializei0ZQCy6__ctorMFNcQCzZSQEhQEg__TQEaTQDrVQCui0ZQEo@Base 12
+ _D3std8typecons__T10RefCountedTSQBe4file15DirIteratorImplVEQCfQCe24RefCountedAutoInitializei0ZQCy6__dtorMFZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe4file15DirIteratorImplVEQCfQCe24RefCountedAutoInitializei0ZQCy6__initZ@Base 12
+ _D3std8typecons__T10RefCountedTSQBe4file15DirIteratorImplVEQCfQCe24RefCountedAutoInitializei0ZQCy8opAssignMFQCzZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe4file15DirIteratorImplVEQCfQCe24RefCountedAutoInitializei0ZQCy8opAssignMFSQEdQEc__TQDwTQDnVQCqi0ZQEkZv@Base 12
+ _D3std8typecons__T10RefCountedTSQBe4file15DirIteratorImplVEQCfQCe24RefCountedAutoInitializei0ZQCy__T6__ctorTAyaTEQEhQDd8SpanModeTbZQBfMFNcKQBfKQBfKbZSQFsQFr__TQFlTQFcVQEfi0ZQFz@Base 12
+ _D3std8typecons__T5TupleTAyaTQeTQhTQkTQnTQqTQtTQwZQBg11__xopEqualsMxFKxSQCsQCr__TQClTQCiTQCmTQCqTQCuTQCyTQDcTQDgTQDkZQDvZb@Base 12
+ _D3std8typecons__T5TupleTAyaTQeTQhTQkTQnTQqTQtTQwZQBg6__ctorMFNaNbNcNiNfQBvQByQCbQCeQChQCkQCnQCqZSQDsQDr__TQDlTQDiTQDmTQDqTQDuTQDyTQEcTQEgTQEkZQEv@Base 12
+ _D3std8typecons__T5TupleTAyaTQeTQhTQkTQnTQqTQtTQwZQBg6__initZ@Base 12
+ _D3std8typecons__T5TupleTAyaTQeTQhTQkTQnTQqTQtTQwZQBg6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTAyaTQeTQhTQkTQnTQqTQtTQwZQBg8__xopCmpMxFKxSQCoQCn__TQChTQCeTQCiTQCmTQCqTQCuTQCyTQDcTQDgZQDrZi@Base 12
+ _D3std8typecons__T5TupleTAyaTQeTQhTQkTQnTQqTQtTQwZQBg__T5opCmpTxSQClQCk__TQCeTQCbTQCfTQCjTQCnTQCrTQCvTQCzTQDdZQDoZQCgMxFNaNbNiNfxQCnZi@Base 12
+ _D3std8typecons__T5TupleTAyaTQeTQhTQkTQnTQqTQtTQwZQBg__T8opEqualsTxSQCoQCn__TQChTQCeTQCiTQCmTQCqTQCuTQCyTQDcTQDgZQDrZQCjMxFNaNbNiNfxQCnZb@Base 12
+ _D3std8typecons__T5TupleTAyaTQeTQhTQkTQnTQqTQtZQBd11__xopEqualsMxFKxSQCpQCo__TQCiTQCfTQCjTQCnTQCrTQCvTQCzTQDdZQDoZb@Base 12
+ _D3std8typecons__T5TupleTAyaTQeTQhTQkTQnTQqTQtZQBd6__ctorMFNaNbNcNiNfQBsQBvQByQCbQCeQChQCkZSQDmQDl__TQDfTQDcTQDgTQDkTQDoTQDsTQDwTQEaZQEl@Base 12
+ _D3std8typecons__T5TupleTAyaTQeTQhTQkTQnTQqTQtZQBd6__initZ@Base 12
+ _D3std8typecons__T5TupleTAyaTQeTQhTQkTQnTQqTQtZQBd6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTAyaTQeTQhTQkTQnTQqTQtZQBd8__xopCmpMxFKxSQClQCk__TQCeTQCbTQCfTQCjTQCnTQCrTQCvTQCzZQDkZi@Base 12
+ _D3std8typecons__T5TupleTAyaTQeTQhTQkTQnTQqTQtZQBd__T5opCmpTxSQCiQCh__TQCbTQByTQCcTQCgTQCkTQCoTQCsTQCwZQDhZQCcMxFNaNbNiNfxQCjZi@Base 12
+ _D3std8typecons__T5TupleTAyaTQeTQhTQkTQnTQqTQtZQBd__T8opEqualsTxSQClQCk__TQCeTQCbTQCfTQCjTQCnTQCrTQCvTQCzZQDkZQCfMxFNaNbNiNfxQCjZb@Base 12
+ _D3std8typecons__T5TupleTAyaTQeTQhZQr11__xopEqualsMxFKxSQCcQCb__TQBvTQBsTQBwTQCaZQClZb@Base 12
+ _D3std8typecons__T5TupleTAyaTQeTQhZQr6__ctorMFNaNbNcNiNfQBfQBiQBlZSQCnQCm__TQCgTQCdTQChTQClZQCw@Base 12
+ _D3std8typecons__T5TupleTAyaTQeTQhZQr6__initZ@Base 12
+ _D3std8typecons__T5TupleTAyaTQeTQhZQr6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTAyaTQeTQhZQr8__xopCmpMxFKxSQByQBx__TQBrTQBoTQBsTQBwZQChZi@Base 12
+ _D3std8typecons__T5TupleTAyaTQeTQhZQr__T5opCmpTxSQBvQBu__TQBoTQBlTQBpTQBtZQCeZQBmMxFNaNbNiNfxQBtZi@Base 12
+ _D3std8typecons__T5TupleTAyaTQeTQhZQr__T8opAssignTSQBxQBw__TQBqTQBnTQBrTQBvZQCgZQBoMFNaNbNcNiNfKQBuZQBy@Base 12
+ _D3std8typecons__T5TupleTAyaTQeTQhZQr__T8opEqualsTxSQByQBx__TQBrTQBoTQBsTQBwZQChZQBpMxFNaNbNiNfxQBtZb@Base 12
+ _D3std8typecons__T5TupleTC15TypeInfo_StructTPG32hZQBg11__xopEqualsMxFKxSQCsQCr__TQClTQCiTQBtZQCxZb@Base 12
+ _D3std8typecons__T5TupleTC15TypeInfo_StructTPG32hZQBg6__ctorMFNaNbNcNiNfQBvQBfZSQDaQCz__TQCtTQCqTQCbZQDf@Base 12
+ _D3std8typecons__T5TupleTC15TypeInfo_StructTPG32hZQBg6__initZ@Base 12
+ _D3std8typecons__T5TupleTC15TypeInfo_StructTPG32hZQBg6toHashMxFNbNfZm@Base 12
+ _D3std8typecons__T5TupleTC15TypeInfo_StructTPG32hZQBg__T8opEqualsTxSQCoQCn__TQChTQCeTQBpZQCtZQBlMxFxQBhZb@Base 12
+ _D3std8typecons__T5TupleTC15TypeInfo_StructTPSQBs11concurrency3TidZQBx11__xopEqualsMxFKxSQDjQDi__TQDcTQCzTQCkZQDoZb@Base 12
+ _D3std8typecons__T5TupleTC15TypeInfo_StructTPSQBs11concurrency3TidZQBx6__ctorMFNaNbNcNiNfQCmQBwZSQDrQDq__TQDkTQDhTQCsZQDw@Base 12
+ _D3std8typecons__T5TupleTC15TypeInfo_StructTPSQBs11concurrency3TidZQBx6__initZ@Base 12
+ _D3std8typecons__T5TupleTC15TypeInfo_StructTPSQBs11concurrency3TidZQBx6toHashMxFNbNfZm@Base 12
+ _D3std8typecons__T5TupleTC15TypeInfo_StructTPSQBs11concurrency3TidZQBx__T8opEqualsTxSQDfQDe__TQCyTQCvTQCgZQDkZQBlMxFxQBhZb@Base 12
+ _D3std8typecons__T5TupleTC8TypeInfoTPvZQv11__xopEqualsMxFKxSQCgQCf__TQBzTQBwTQBpZQClZb@Base 12
+ _D3std8typecons__T5TupleTC8TypeInfoTPvZQv6__ctorMFNaNbNcNiNfQBjQBbZSQCoQCn__TQChTQCeTQBxZQCt@Base 12
+ _D3std8typecons__T5TupleTC8TypeInfoTPvZQv6__initZ@Base 12
+ _D3std8typecons__T5TupleTC8TypeInfoTPvZQv6toHashMxFNbNfZm@Base 12
+ _D3std8typecons__T5TupleTC8TypeInfoTPvZQv__T8opEqualsTxSQCcQCb__TQBvTQBsTQBlZQChZQBlMxFNbNfxQBlZb@Base 12
+ _D3std8typecons__T5TupleTEQy8encoding3BOMTAhZQBb11__xopEqualsMxFKxSQCnQCm__TQCgTQCdTQBqZQCsZb@Base 12
+ _D3std8typecons__T5TupleTEQy8encoding3BOMTAhZQBb6__ctorMFNaNbNcNiNfQBqQBcZSQCvQCu__TQCoTQClTQByZQDa@Base 12
+ _D3std8typecons__T5TupleTEQy8encoding3BOMTAhZQBb6__initZ@Base 12
+ _D3std8typecons__T5TupleTEQy8encoding3BOMTAhZQBb6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTEQy8encoding3BOMTAhZQBb8__xopCmpMxFKxSQCjQCi__TQCcTQBzTQBmZQCoZi@Base 12
+ _D3std8typecons__T5TupleTEQy8encoding3BOMTAhZQBb__T5opCmpTxSQCgQCf__TQBzTQBwTQBjZQClZQBiMxFNaNbNiNfxQBpZi@Base 12
+ _D3std8typecons__T5TupleTEQy8encoding3BOMTAhZQBb__T8opEqualsTxSQCjQCi__TQCcTQBzTQBmZQCoZQBlMxFNaNbNiNfxQBpZb@Base 12
+ _D3std8typecons__T5TupleTEQy8encoding3BOMVAyaa6_736368656d61TAhVQwa8_73657175656e6365ZQCq11__xopEqualsMxFKxSQEcQEb__TQDvTQDsVQDfa6_736368656d61TQDfVQEca8_73657175656e6365ZQFxZb@Base 12
+ _D3std8typecons__T5TupleTEQy8encoding3BOMVAyaa6_736368656d61TAhVQwa8_73657175656e6365ZQCq12_Tuple_superMNgFNaNbNcNdNiNeZNgSQErQEq__TQEkTQEhTQDbZQEw@Base 12
+ _D3std8typecons__T5TupleTEQy8encoding3BOMVAyaa6_736368656d61TAhVQwa8_73657175656e6365ZQCq6__ctorMFNaNbNcNiNfQDfQByZSQEkQEj__TQEdTQEaVQDna6_736368656d61TQDnVQEka8_73657175656e6365ZQGf@Base 12
+ _D3std8typecons__T5TupleTEQy8encoding3BOMVAyaa6_736368656d61TAhVQwa8_73657175656e6365ZQCq6__initZ@Base 12
+ _D3std8typecons__T5TupleTEQy8encoding3BOMVAyaa6_736368656d61TAhVQwa8_73657175656e6365ZQCq6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTEQy8encoding3BOMVAyaa6_736368656d61TAhVQwa8_73657175656e6365ZQCq8__xopCmpMxFKxSQDyQDx__TQDrTQDoVQDba6_736368656d61TQDbVQDya8_73657175656e6365ZQFtZi@Base 12
+ _D3std8typecons__T5TupleTEQy8encoding3BOMVAyaa6_736368656d61TAhVQwa8_73657175656e6365ZQCq__T5opCmpTxSQDvQDu__TQDoTQDlVQCya6_736368656d61TQCyVQDva8_73657175656e6365ZQFqZQCyMxFNaNbNiNfxQDfZi@Base 12
+ _D3std8typecons__T5TupleTEQy8encoding3BOMVAyaa6_736368656d61TAhVQwa8_73657175656e6365ZQCq__T8opEqualsTxSQDyQDx__TQDrTQDoVQDba6_736368656d61TQDbVQDya8_73657175656e6365ZQFtZQDbMxFNaNbNiNfxQDfZb@Base 12
+ _D3std8typecons__T5TupleTSQy3uni__T13InversionListTSQByQBb8GcPolicyZQBhTEQCtQBw__T16UnicodeSetParserTSQDw5regex8internal6parser__T6ParserTAyaTSQFlQBpQBmQBg7CodeGenZQBiZQDi8OperatorZQGh11__fieldDtorMFNaNbNiNeZv@Base 12
+ _D3std8typecons__T5TupleTSQy3uni__T13InversionListTSQByQBb8GcPolicyZQBhTEQCtQBw__T16UnicodeSetParserTSQDw5regex8internal6parser__T6ParserTAyaTSQFlQBpQBmQBg7CodeGenZQBiZQDi8OperatorZQGh11__xopEqualsMxFKxSQHtQHs__TQHmTQHjTQFsZQHyZb@Base 12
+ _D3std8typecons__T5TupleTSQy3uni__T13InversionListTSQByQBb8GcPolicyZQBhTEQCtQBw__T16UnicodeSetParserTSQDw5regex8internal6parser__T6ParserTAyaTSQFlQBpQBmQBg7CodeGenZQBiZQDi8OperatorZQGh15__fieldPostblitMFNaNbNiNlNeZv@Base 12
+ _D3std8typecons__T5TupleTSQy3uni__T13InversionListTSQByQBb8GcPolicyZQBhTEQCtQBw__T16UnicodeSetParserTSQDw5regex8internal6parser__T6ParserTAyaTSQFlQBpQBmQBg7CodeGenZQBiZQDi8OperatorZQGh6__ctorMFNaNbNcNiNfQGwQFeZSQIbQIa__TQHuTQHrTQGaZQIg@Base 12
+ _D3std8typecons__T5TupleTSQy3uni__T13InversionListTSQByQBb8GcPolicyZQBhTEQCtQBw__T16UnicodeSetParserTSQDw5regex8internal6parser__T6ParserTAyaTSQFlQBpQBmQBg7CodeGenZQBiZQDi8OperatorZQGh6__initZ@Base 12
+ _D3std8typecons__T5TupleTSQy3uni__T13InversionListTSQByQBb8GcPolicyZQBhTEQCtQBw__T16UnicodeSetParserTSQDw5regex8internal6parser__T6ParserTAyaTSQFlQBpQBmQBg7CodeGenZQBiZQDi8OperatorZQGh6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTSQy3uni__T13InversionListTSQByQBb8GcPolicyZQBhTEQCtQBw__T16UnicodeSetParserTSQDw5regex8internal6parser__T6ParserTAyaTSQFlQBpQBmQBg7CodeGenZQBiZQDi8OperatorZQGh__T8opEqualsTxSQHpQHo__TQHiTQHfTQFoZQHuZQBlMxFNaNbNiNfxQBpZb@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCv5range__T10OnlyResultTaZQpTSQEaQDd__TQDcTAxaZQDkFQiZQCvZQEm11__xopEqualsMxFKxSQFyQFx__TQFrTQFoTQDvTQCuZQGhZb@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCv5range__T10OnlyResultTaZQpTSQEaQDd__TQDcTAxaZQDkFQiZQCvZQEm6__ctorMFNaNbNcNiNfQFbQDhQCfZSQGjQGi__TQGcTQFzTQEgTQDfZQGs@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCv5range__T10OnlyResultTaZQpTSQEaQDd__TQDcTAxaZQDkFQiZQCvZQEm6__initZ@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCv5range__T10OnlyResultTaZQpTSQEaQDd__TQDcTAxaZQDkFQiZQCvZQEm6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCv5range__T10OnlyResultTaZQpTSQEaQDd__TQDcTAxaZQDkFQiZQCvZQEm__T8opEqualsTxSQFuQFt__TQFnTQFkTQDrTQCqZQGdZQBpMxFNaNbNiNfxQBtZb@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCv5range__T10OnlyResultTaZQpZQDj11__xopEqualsMxFKxSQEvQEu__TQEoTQElTQCsZQFaZb@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCv5range__T10OnlyResultTaZQpZQDj6__ctorMFNaNbNcNiNfQDyQCeZSQFdQFc__TQEwTQEtTQDaZQFi@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCv5range__T10OnlyResultTaZQpZQDj6__initZ@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCv5range__T10OnlyResultTaZQpZQDj6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCv5range__T10OnlyResultTaZQpZQDj__T8opBinaryVAyaa1_7eTSQEzQEy__TQEsTSQFnQEq__TQEpTAxaZQExFQiZQEiZQFzZQCoMFNaNbNiNfQCiZSQHlQHk__TQHeTQHbTQFiTQCuZQHu@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplTSQCv5range__T10OnlyResultTaZQpZQDj__T8opEqualsTxSQErQEq__TQEkTQEhTQCoZQEwZQBlMxFNaNbNiNfxQBpZb@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQCe11__xopEqualsMxFKxSQDqQDp__TQDjTQDgZQDrZb@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQCe6__ctorMFNaNbNcNiNfQCtZSQDvQDu__TQDoTQDlZQDw@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQCe6__initZ@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQCe6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQCe__T8opBinaryVAyaa1_7eTSQDuQDt__TQDnTSQEi5range__T10OnlyResultTaZQpZQEwZQCqMFNaNbNiNfQCkZSQGiQGh__TQGbTQFyTQCsZQGn@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQCe__T8opEqualsTxSQDmQDl__TQDfTQDcZQDnZQBhMxFNaNbNiNfxQBlZb@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCw5range__T10OnlyResultTaZQpTQDdZQDo11__xopEqualsMxFKxSQFaQEz__TQEtTQEqTQCwTQEyZQFjZb@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCw5range__T10OnlyResultTaZQpTQDdZQDo6__ctorMFNaNbNcNiNfQEdQCiQEjZSQFlQFk__TQFeTQFbTQDhTQFjZQFu@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCw5range__T10OnlyResultTaZQpTQDdZQDo6__initZ@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCw5range__T10OnlyResultTaZQpTQDdZQDo6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCw5range__T10OnlyResultTaZQpTQDdZQDo__T8opEqualsTxSQEwQEv__TQEpTQEmTQCsTQEuZQFfZQBpMxFNaNbNiNfxQBtZb@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCw5range__T10OnlyResultTaZQpZQDk11__xopEqualsMxFKxSQEwQEv__TQEpTQEmTQCsZQFbZb@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCw5range__T10OnlyResultTaZQpZQDk6__ctorMFNaNbNcNiNfQDzQCeZSQFeQFd__TQExTQEuTQDaZQFj@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCw5range__T10OnlyResultTaZQpZQDk6__initZ@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCw5range__T10OnlyResultTaZQpZQDk6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCw5range__T10OnlyResultTaZQpZQDk__T8opBinaryVAyaa1_7eTSQFaQEz__TQEtTQEqZQFbZQBpMFNaNbNiNfQBjZSQGnQGm__TQGgTQGdTQEjTQGlZQGw@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplTSQCw5range__T10OnlyResultTaZQpZQDk__T8opEqualsTxSQEsQEr__TQElTQEiTQCoZQExZQBlMxFNaNbNiNfxQBpZb@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQCf11__xopEqualsMxFKxSQDrQDq__TQDkTQDhZQDsZb@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQCf6__ctorMFNaNbNcNiNfQCuZSQDwQDv__TQDpTQDmZQDx@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQCf6__initZ@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQCf6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQCf__T8opBinaryVAyaa1_7eTSQDvQDu__TQDoTSQEj5range__T10OnlyResultTaZQpZQExZQCqMFNaNbNiNfQCkZSQGjQGi__TQGcTQFzTQCsZQGo@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQCf__T8opEqualsTxSQDnQDm__TQDgTQDdZQDoZQBhMxFNaNbNiNfxQBlZb@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCw5range__T10OnlyResultTaZQpTQDdZQDo11__xopEqualsMxFKxSQFaQEz__TQEtTQEqTQCwTQEyZQFjZb@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCw5range__T10OnlyResultTaZQpTQDdZQDo6__ctorMFNaNbNcNiNfQEdQCiQEjZSQFlQFk__TQFeTQFbTQDhTQFjZQFu@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCw5range__T10OnlyResultTaZQpTQDdZQDo6__initZ@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCw5range__T10OnlyResultTaZQpTQDdZQDo6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCw5range__T10OnlyResultTaZQpTQDdZQDo__T8opEqualsTxSQEwQEv__TQEpTQEmTQCsTQEuZQFfZQBpMxFNaNbNiNfxQBtZb@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCw5range__T10OnlyResultTaZQpZQDk11__xopEqualsMxFKxSQEwQEv__TQEpTQEmTQCsZQFbZb@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCw5range__T10OnlyResultTaZQpZQDk6__ctorMFNaNbNcNiNfQDzQCeZSQFeQFd__TQExTQEuTQDaZQFj@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCw5range__T10OnlyResultTaZQpZQDk6__initZ@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCw5range__T10OnlyResultTaZQpZQDk6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCw5range__T10OnlyResultTaZQpZQDk__T8opBinaryVQCwa1_7eTSQFaQEz__TQEtTQEqZQFbZQBpMFNaNbNiNfQBjZSQGnQGm__TQGgTQGdTQEjTQGlZQGw@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplTSQCw5range__T10OnlyResultTaZQpZQDk__T8opEqualsTxSQEsQEr__TQElTQEiTQCoZQExZQBlMxFNaNbNiNfxQBpZb@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplZQCf11__xopEqualsMxFKxSQDrQDq__TQDkTQDhZQDsZb@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplZQCf6__ctorMFNaNbNcNiNfQCuZSQDwQDv__TQDpTQDmZQDx@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplZQCf6__initZ@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplZQCf6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplZQCf__T8opBinaryVQBra1_7eTSQDvQDu__TQDoTSQEj5range__T10OnlyResultTaZQpZQExZQCqMFNaNbNiNfQCkZSQGjQGi__TQGcTQFzTQCsZQGo@Base 12
+ _D3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplZQCf__T8opEqualsTxSQDnQDm__TQDgTQDdZQDoZQBhMxFNaNbNiNfxQBlZb@Base 12
+ _D3std8typecons__T5TupleTSQy4conv__T7toCharsVii10TaVEQBz5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQDf11__xopEqualsMxFKxSQErQEq__TQEkTQEhZQEsZb@Base 12
+ _D3std8typecons__T5TupleTSQy4conv__T7toCharsVii10TaVEQBz5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQDf6__ctorMFNaNbNcNiNfQDuZSQEwQEv__TQEpTQEmZQEx@Base 12
+ _D3std8typecons__T5TupleTSQy4conv__T7toCharsVii10TaVEQBz5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQDf6__initZ@Base 12
+ _D3std8typecons__T5TupleTSQy4conv__T7toCharsVii10TaVEQBz5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQDf6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTSQy4conv__T7toCharsVii10TaVEQBz5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQDf__T8opEqualsTxSQEnQEm__TQEgTQEdZQEoZQBhMxFNaNbNiNfxQBlZb@Base 12
+ _D3std8typecons__T5TupleTSQy5range__T10OnlyResultTaZQpZQBl11__xopEqualsMxFKxSQCxQCw__TQCqTQCnZQCyZb@Base 12
+ _D3std8typecons__T5TupleTSQy5range__T10OnlyResultTaZQpZQBl6__ctorMFNaNbNcNiNfQCaZSQDcQDb__TQCvTQCsZQDd@Base 12
+ _D3std8typecons__T5TupleTSQy5range__T10OnlyResultTaZQpZQBl6__initZ@Base 12
+ _D3std8typecons__T5TupleTSQy5range__T10OnlyResultTaZQpZQBl6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTSQy5range__T10OnlyResultTaZQpZQBl__T8opEqualsTxSQCtQCs__TQCmTQCjZQCuZQBhMxFNaNbNiNfxQBlZb@Base 12
+ _D3std8typecons__T5TupleTSQy5range__T4TakeTSQBqQt__T6RepeatTaZQkZQBcTSQCq4conv__T7toCharsVii10TaVEQDs5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQEy11__xopEqualsMxFKxSQGkQGj__TQGdTQGaTQEmZQGpZb@Base 12
+ _D3std8typecons__T5TupleTSQy5range__T4TakeTSQBqQt__T6RepeatTaZQkZQBcTSQCq4conv__T7toCharsVii10TaVEQDs5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQEy6__ctorMFNaNbNcNiNfQFnQDyZSQGsQGr__TQGlTQGiTQEuZQGx@Base 12
+ _D3std8typecons__T5TupleTSQy5range__T4TakeTSQBqQt__T6RepeatTaZQkZQBcTSQCq4conv__T7toCharsVii10TaVEQDs5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQEy6__initZ@Base 12
+ _D3std8typecons__T5TupleTSQy5range__T4TakeTSQBqQt__T6RepeatTaZQkZQBcTSQCq4conv__T7toCharsVii10TaVEQDs5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQEy6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTSQy5range__T4TakeTSQBqQt__T6RepeatTaZQkZQBcTSQCq4conv__T7toCharsVii10TaVEQDs5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQEy__T8opEqualsTxSQGgQGf__TQFzTQFwTQEiZQGlZQBlMxFNaNbNiNfxQBpZb@Base 12
+ _D3std8typecons__T5TupleTSQy5range__T4TakeTSQBqQt__T6RepeatTaZQkZQBcZQBz11__xopEqualsMxFKxSQDlQDk__TQDeTQDbZQDmZb@Base 12
+ _D3std8typecons__T5TupleTSQy5range__T4TakeTSQBqQt__T6RepeatTaZQkZQBcZQBz6__ctorMFNaNbNcNiNfQCoZSQDqQDp__TQDjTQDgZQDr@Base 12
+ _D3std8typecons__T5TupleTSQy5range__T4TakeTSQBqQt__T6RepeatTaZQkZQBcZQBz6__initZ@Base 12
+ _D3std8typecons__T5TupleTSQy5range__T4TakeTSQBqQt__T6RepeatTaZQkZQBcZQBz6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTSQy5range__T4TakeTSQBqQt__T6RepeatTaZQkZQBcZQBz__T8opBinaryVAyaa1_7eTSQDpQDo__TQDiTSQEd4conv__T7toCharsVii10TaVEQFf5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQGlZQEkMFNaNbNiNfQEeZSQHxQHw__TQHqTQHnTQEmZQIc@Base 12
+ _D3std8typecons__T5TupleTSQy5range__T4TakeTSQBqQt__T6RepeatTaZQkZQBcZQBz__T8opEqualsTxSQDhQDg__TQDaTQCxZQDiZQBhMxFNaNbNiNfxQBlZb@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T12FilterResultSQCm8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda3TSQEj5range__T4iotaTmTxmZQlFmxmZ6ResultZQDyZQFj11__xopEqualsMxFKxSQGvQGu__TQGoTQGlZQGwZb@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T12FilterResultSQCm8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda3TSQEj5range__T4iotaTmTxmZQlFmxmZ6ResultZQDyZQFj6__ctorMFNaNbNcNiNfQFyZSQHaQGz__TQGtTQGqZQHb@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T12FilterResultSQCm8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda3TSQEj5range__T4iotaTmTxmZQlFmxmZ6ResultZQDyZQFj6__initZ@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T12FilterResultSQCm8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda3TSQEj5range__T4iotaTmTxmZQlFmxmZ6ResultZQDyZQFj6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T12FilterResultSQCm8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda3TSQEj5range__T4iotaTmTxmZQlFmxmZ6ResultZQDyZQFj__T8opEqualsTxSQGrQGq__TQGkTQGhZQGsZQBhMxFNaNbNiNfxQBlZb@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T6joinerTSQCgQBjQBc__T9MapResultSQDd8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQFaQEdQDw__T12FilterResultSQGbQCyQCsQCmMxFNbNdZ9__lambda1TSQHh5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGdZQHcFQGyZQyTSQJlQIoQIh__TQElSQKbQGyQGsQGmMxFNbNdZ9__lambda3TSQLhQEa__TQDxTmTxmZQEgFmxmZQDyZQGzZQLz11__xopEqualsMxFKxSQNlQNk__TQNeTQNbTQEsZQNqZb@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T6joinerTSQCgQBjQBc__T9MapResultSQDd8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQFaQEdQDw__T12FilterResultSQGbQCyQCsQCmMxFNbNdZ9__lambda1TSQHh5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGdZQHcFQGyZQyTSQJlQIoQIh__TQElSQKbQGyQGsQGmMxFNbNdZ9__lambda3TSQLhQEa__TQDxTmTxmZQEgFmxmZQDyZQGzZQLz6__ctorMFNaNbNcNiNfQMoQEeZSQNtQNs__TQNmTQNjTQFaZQNy@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T6joinerTSQCgQBjQBc__T9MapResultSQDd8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQFaQEdQDw__T12FilterResultSQGbQCyQCsQCmMxFNbNdZ9__lambda1TSQHh5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGdZQHcFQGyZQyTSQJlQIoQIh__TQElSQKbQGyQGsQGmMxFNbNdZ9__lambda3TSQLhQEa__TQDxTmTxmZQEgFmxmZQDyZQGzZQLz6__initZ@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T6joinerTSQCgQBjQBc__T9MapResultSQDd8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQFaQEdQDw__T12FilterResultSQGbQCyQCsQCmMxFNbNdZ9__lambda1TSQHh5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGdZQHcFQGyZQyTSQJlQIoQIh__TQElSQKbQGyQGsQGmMxFNbNdZ9__lambda3TSQLhQEa__TQDxTmTxmZQEgFmxmZQDyZQGzZQLz6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T6joinerTSQCgQBjQBc__T9MapResultSQDd8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQFaQEdQDw__T12FilterResultSQGbQCyQCsQCmMxFNbNdZ9__lambda1TSQHh5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGdZQHcFQGyZQyTSQJlQIoQIh__TQElSQKbQGyQGsQGmMxFNbNdZ9__lambda3TSQLhQEa__TQDxTmTxmZQEgFmxmZQDyZQGzZQLz__T8opEqualsTxSQNhQNg__TQNaTQMxTQEoZQNmZQBlMxFNaNbNiNfxQBpZb@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T6joinerTSQCgQBjQBc__T9MapResultSQDd8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQFaQEdQDw__T12FilterResultSQGbQCyQCsQCmMxFNbNdZ9__lambda1TSQHh5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGdZQHcFQGyZQyZQIu11__xopEqualsMxFKxSQKgQKf__TQJzTQJwZQKhZb@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T6joinerTSQCgQBjQBc__T9MapResultSQDd8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQFaQEdQDw__T12FilterResultSQGbQCyQCsQCmMxFNbNdZ9__lambda1TSQHh5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGdZQHcFQGyZQyZQIu6__ctorMFNaNbNcNiNfQJjZSQKlQKk__TQKeTQKbZQKm@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T6joinerTSQCgQBjQBc__T9MapResultSQDd8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQFaQEdQDw__T12FilterResultSQGbQCyQCsQCmMxFNbNdZ9__lambda1TSQHh5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGdZQHcFQGyZQyZQIu6__initZ@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T6joinerTSQCgQBjQBc__T9MapResultSQDd8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQFaQEdQDw__T12FilterResultSQGbQCyQCsQCmMxFNbNdZ9__lambda1TSQHh5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGdZQHcFQGyZQyZQIu6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T6joinerTSQCgQBjQBc__T9MapResultSQDd8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQFaQEdQDw__T12FilterResultSQGbQCyQCsQCmMxFNbNdZ9__lambda1TSQHh5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGdZQHcFQGyZQyZQIu__T8opBinaryVAyaa1_7eTSQKkQKj__TQKdTSQKyQKbQJu__TQFySQLoQIlQIfQHzMxFNbNdZ9__lambda3TSQMuQFn__TQFkTmTxmZQFtFmxmZQFlZQImZQNmZQEqMFNaNbNiNfQEkZSQOyQOx__TQOrTQOoTQEsZQPd@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T6joinerTSQCgQBjQBc__T9MapResultSQDd8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQFaQEdQDw__T12FilterResultSQGbQCyQCsQCmMxFNbNdZ9__lambda1TSQHh5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGdZQHcFQGyZQyZQIu__T8opEqualsTxSQKcQKb__TQJvTQJsZQKdZQBhMxFNaNbNiNfxQBlZb@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T9MapResultSQCi10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQEm3uni21DecompressedIntervalsZQDuTSQFwQEzQEs__TQElSQGmQEe__TQDvVQDpa4_615b315dVQEea1_61ZQExTQDqZQGiZQHt11__xopEqualsMxFKxSQJfQJe__TQIyTQIvTQEbZQJkZb@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T9MapResultSQCi10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQEm3uni21DecompressedIntervalsZQDuTSQFwQEzQEs__TQElSQGmQEe__TQDvVQDpa4_615b315dVQEea1_61ZQExTQDqZQGiZQHt6__ctorMFNaNbNcNiNfQIiQDnZSQJnQJm__TQJgTQJdTQEjZQJs@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T9MapResultSQCi10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQEm3uni21DecompressedIntervalsZQDuTSQFwQEzQEs__TQElSQGmQEe__TQDvVQDpa4_615b315dVQEea1_61ZQExTQDqZQGiZQHt6__initZ@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T9MapResultSQCi10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQEm3uni21DecompressedIntervalsZQDuTSQFwQEzQEs__TQElSQGmQEe__TQDvVQDpa4_615b315dVQEea1_61ZQExTQDqZQGiZQHt6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T9MapResultSQCi10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQEm3uni21DecompressedIntervalsZQDuTSQFwQEzQEs__TQElSQGmQEe__TQDvVQDpa4_615b315dVQEea1_61ZQExTQDqZQGiZQHt__T8opEqualsTxSQJbQJa__TQIuTQIrTQDxZQJgZQBlMxFNaNbNiNfxQBpZb@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T9MapResultSQCi10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQEm3uni21DecompressedIntervalsZQDuZQFf11__xopEqualsMxFKxSQGrQGq__TQGkTQGhZQGsZb@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T9MapResultSQCi10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQEm3uni21DecompressedIntervalsZQDuZQFf6__ctorMFNaNbNcNiNfQFuZSQGwQGv__TQGpTQGmZQGx@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T9MapResultSQCi10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQEm3uni21DecompressedIntervalsZQDuZQFf6__initZ@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T9MapResultSQCi10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQEm3uni21DecompressedIntervalsZQDuZQFf6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T9MapResultSQCi10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQEm3uni21DecompressedIntervalsZQDuZQFf__T8opBinaryVQDba1_7eTSQGvQGu__TQGoTSQHjQGmQGf__TQFySQHzQFr__TQFiVQFca4_615b315dVQFra1_61ZQGkTQFdZQHvZQJgZQDzMFNaNbNiNfQDtZSQKsQKr__TQKlTQKiTQEbZQKx@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T9MapResultSQCi10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQEm3uni21DecompressedIntervalsZQDuZQFf__T8opEqualsTxSQGnQGm__TQGgTQGdZQGoZQBhMxFNaNbNiNfxQBlZb@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T9MapResultSQCi10functional__T8unaryFunVAyaa4_615b315dVQpa1_61ZQBhTSQEm3uni21DecompressedIntervalsZQDuZQFf11__xopEqualsMxFKxSQGrQGq__TQGkTQGhZQGsZb@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T9MapResultSQCi10functional__T8unaryFunVAyaa4_615b315dVQpa1_61ZQBhTSQEm3uni21DecompressedIntervalsZQDuZQFf6__ctorMFNaNbNcNiNfQFuZSQGwQGv__TQGpTQGmZQGx@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T9MapResultSQCi10functional__T8unaryFunVAyaa4_615b315dVQpa1_61ZQBhTSQEm3uni21DecompressedIntervalsZQDuZQFf6__initZ@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T9MapResultSQCi10functional__T8unaryFunVAyaa4_615b315dVQpa1_61ZQBhTSQEm3uni21DecompressedIntervalsZQDuZQFf6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTSQy9algorithm9iteration__T9MapResultSQCi10functional__T8unaryFunVAyaa4_615b315dVQpa1_61ZQBhTSQEm3uni21DecompressedIntervalsZQDuZQFf__T8opEqualsTxSQGnQGm__TQGgTQGdZQGoZQBhMxFNaNbNiNfxQBlZb@Base 12
+ _D3std8typecons__T5TupleTaTaZQl11__xopEqualsMxFKxSQBwQBv__TQBpTaTaZQBxZb@Base 12
+ _D3std8typecons__T5TupleTaTaZQl6__ctorMFNaNbNcNiNfaaZSQCaQBz__TQBtTaTaZQCb@Base 12
+ _D3std8typecons__T5TupleTaTaZQl6__initZ@Base 12
+ _D3std8typecons__T5TupleTaTaZQl6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTaTaZQl8__xopCmpMxFKxSQBsQBr__TQBlTaTaZQBtZi@Base 12
+ _D3std8typecons__T5TupleTaTaZQl__T5opCmpTxSQBpQBo__TQBiTaTaZQBqZQBeMxFNaNbNiNfxQBlZi@Base 12
+ _D3std8typecons__T5TupleTaTaZQl__T8opEqualsTxSQBsQBr__TQBlTaTaZQBtZQBhMxFNaNbNiNfxQBlZb@Base 12
+ _D3std8typecons__T5TupleTbTiZQl11__xopEqualsMxFKxSQBwQBv__TQBpTbTiZQBxZb@Base 12
+ _D3std8typecons__T5TupleTbTiZQl6__ctorMFNaNbNcNiNfbiZSQCaQBz__TQBtTbTiZQCb@Base 12
+ _D3std8typecons__T5TupleTbTiZQl6__initZ@Base 12
+ _D3std8typecons__T5TupleTbTiZQl6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTbTiZQl8__xopCmpMxFKxSQBsQBr__TQBlTbTiZQBtZi@Base 12
+ _D3std8typecons__T5TupleTbTiZQl__T5opCmpTxSQBpQBo__TQBiTbTiZQBqZQBeMxFNaNbNiNfxQBlZi@Base 12
+ _D3std8typecons__T5TupleTbTiZQl__T8opEqualsTxSQBsQBr__TQBlTbTiZQBtZQBhMxFNaNbNiNfxQBlZb@Base 12
+ _D3std8typecons__T5TupleTbTkZQl11__xopEqualsMxFKxSQBwQBv__TQBpTbTkZQBxZb@Base 12
+ _D3std8typecons__T5TupleTbTkZQl6__ctorMFNaNbNcNiNfbkZSQCaQBz__TQBtTbTkZQCb@Base 12
+ _D3std8typecons__T5TupleTbTkZQl6__initZ@Base 12
+ _D3std8typecons__T5TupleTbTkZQl6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTbTkZQl8__xopCmpMxFKxSQBsQBr__TQBlTbTkZQBtZi@Base 12
+ _D3std8typecons__T5TupleTbTkZQl__T5opCmpTxSQBpQBo__TQBiTbTkZQBqZQBeMxFNaNbNiNfxQBlZi@Base 12
+ _D3std8typecons__T5TupleTbTkZQl__T8opEqualsTxSQBsQBr__TQBlTbTkZQBtZQBhMxFNaNbNiNfxQBlZb@Base 12
+ _D3std8typecons__T5TupleTbVAyaa10_7465726d696e61746564TiVQBea6_737461747573ZQCg11__xopEqualsMxFKxSQDsQDr__TQDlTbVQDia10_7465726d696e61746564TiVQEma6_737461747573ZQFoZb@Base 12
+ _D3std8typecons__T5TupleTbVAyaa10_7465726d696e61746564TiVQBea6_737461747573ZQCg12_Tuple_superMNgFNaNbNcNdNiNeZNgSQEhQEg__TQEaTbTiZQEi@Base 12
+ _D3std8typecons__T5TupleTbVAyaa10_7465726d696e61746564TiVQBea6_737461747573ZQCg6__ctorMFNaNbNcNiNfbiZSQDwQDv__TQDpTbVQDma10_7465726d696e61746564TiVQEqa6_737461747573ZQFs@Base 12
+ _D3std8typecons__T5TupleTbVAyaa10_7465726d696e61746564TiVQBea6_737461747573ZQCg6__initZ@Base 12
+ _D3std8typecons__T5TupleTbVAyaa10_7465726d696e61746564TiVQBea6_737461747573ZQCg6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTbVAyaa10_7465726d696e61746564TiVQBea6_737461747573ZQCg8__xopCmpMxFKxSQDoQDn__TQDhTbVQDea10_7465726d696e61746564TiVQEia6_737461747573ZQFkZi@Base 12
+ _D3std8typecons__T5TupleTbVAyaa10_7465726d696e61746564TiVQBea6_737461747573ZQCg__T5opCmpTxSQDlQDk__TQDeTbVQDba10_7465726d696e61746564TiVQEfa6_737461747573ZQFhZQCzMxFNaNbNiNfxQDgZi@Base 12
+ _D3std8typecons__T5TupleTbVAyaa10_7465726d696e61746564TiVQBea6_737461747573ZQCg__T8opEqualsTxSQDoQDn__TQDhTbVQDea10_7465726d696e61746564TiVQEia6_737461747573ZQFkZQDcMxFNaNbNiNfxQDgZb@Base 12
+ _D3std8typecons__T5TupleTeTeTeTeZQp11__xopEqualsMxFKxSQCaQBz__TQBtTeTeTeTeZQCfZb@Base 12
+ _D3std8typecons__T5TupleTeTeTeTeZQp6__ctorMFNaNbNcNiNfeeeeZSQCgQCf__TQBzTeTeTeTeZQCl@Base 12
+ _D3std8typecons__T5TupleTeTeTeTeZQp6__initZ@Base 12
+ _D3std8typecons__T5TupleTeTeTeTeZQp6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTeTeTeTeZQp__T5opCmpTxSQBtQBs__TQBmTeTeTeTeZQByZQBiMxFNaNbNiNfxQBpZf@Base 12
+ _D3std8typecons__T5TupleTeTeTeTeZQp__T8opEqualsTxSQBwQBv__TQBpTeTeTeTeZQCbZQBlMxFNaNbNiNfxQBpZb@Base 12
+ _D3std8typecons__T5TupleTiTAyaZQn11__xopEqualsMxFKxSQByQBx__TQBrTiTQBoZQCbZb@Base 12
+ _D3std8typecons__T5TupleTiTAyaZQn6__ctorMFNaNbNcNiNfiQBaZSQCeQCd__TQBxTiTQBuZQCh@Base 12
+ _D3std8typecons__T5TupleTiTAyaZQn6__initZ@Base 12
+ _D3std8typecons__T5TupleTiTAyaZQn6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTiTAyaZQn8__xopCmpMxFKxSQBuQBt__TQBnTiTQBkZQBxZi@Base 12
+ _D3std8typecons__T5TupleTiTAyaZQn__T5opCmpTxSQBrQBq__TQBkTiTQBhZQBuZQBgMxFNaNbNiNfxQBnZi@Base 12
+ _D3std8typecons__T5TupleTiTAyaZQn__T8opEqualsTxSQBuQBt__TQBnTiTQBkZQBxZQBjMxFNaNbNiNfxQBnZb@Base 12
+ _D3std8typecons__T5TupleTiVAyaa6_737461747573TQtVQwa6_6f7574707574ZQBx11__xopEqualsMxFKxSQDjQDi__TQDcTiVQCza6_737461747573TQDsVQDwa6_6f7574707574ZQEyZb@Base 12
+ _D3std8typecons__T5TupleTiVAyaa6_737461747573TQtVQwa6_6f7574707574ZQBx12_Tuple_superMNgFNaNbNcNdNiNeZNgSQDyQDx__TQDrTiTQDoZQEb@Base 12
+ _D3std8typecons__T5TupleTiVAyaa6_737461747573TQtVQwa6_6f7574707574ZQBx6__ctorMFNaNbNcNiNfiQClZSQDpQDo__TQDiTiVQDfa6_737461747573TQDyVQEca6_6f7574707574ZQFe@Base 12
+ _D3std8typecons__T5TupleTiVAyaa6_737461747573TQtVQwa6_6f7574707574ZQBx6__initZ@Base 12
+ _D3std8typecons__T5TupleTiVAyaa6_737461747573TQtVQwa6_6f7574707574ZQBx6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTiVAyaa6_737461747573TQtVQwa6_6f7574707574ZQBx8__xopCmpMxFKxSQDfQDe__TQCyTiVQCva6_737461747573TQDoVQDsa6_6f7574707574ZQEuZi@Base 12
+ _D3std8typecons__T5TupleTiVAyaa6_737461747573TQtVQwa6_6f7574707574ZQBx__T5opCmpTxSQDcQDb__TQCvTiVQCsa6_737461747573TQDlVQDpa6_6f7574707574ZQErZQCsMxFNaNbNiNfxQCzZi@Base 12
+ _D3std8typecons__T5TupleTiVAyaa6_737461747573TQtVQwa6_6f7574707574ZQBx__T8opEqualsTxSQDfQDe__TQCyTiVQCva6_737461747573TQDoVQDsa6_6f7574707574ZQEuZQCvMxFNaNbNiNfxQCzZb@Base 12
+ _D3std8typecons__T5TupleTkTkTkZQn11__xopEqualsMxFKxSQByQBx__TQBrTkTkTkZQCbZb@Base 12
+ _D3std8typecons__T5TupleTkTkTkZQn6__ctorMFNaNbNcNiNfkkkZSQCdQCc__TQBwTkTkTkZQCg@Base 12
+ _D3std8typecons__T5TupleTkTkTkZQn6__initZ@Base 12
+ _D3std8typecons__T5TupleTkTkTkZQn6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTkTkTkZQn8__xopCmpMxFKxSQBuQBt__TQBnTkTkTkZQBxZi@Base 12
+ _D3std8typecons__T5TupleTkTkTkZQn__T5opCmpTxSQBrQBq__TQBkTkTkTkZQBuZQBgMxFNaNbNiNfxQBnZi@Base 12
+ _D3std8typecons__T5TupleTkTkTkZQn__T8opEqualsTxSQBuQBt__TQBnTkTkTkZQBxZQBjMxFNaNbNiNfxQBnZb@Base 12
+ _D3std8typecons__T5TupleTkTmZQl11__xopEqualsMxFKxSQBwQBv__TQBpTkTmZQBxZb@Base 12
+ _D3std8typecons__T5TupleTkTmZQl6__ctorMFNaNbNcNiNfkmZSQCaQBz__TQBtTkTmZQCb@Base 12
+ _D3std8typecons__T5TupleTkTmZQl6__initZ@Base 12
+ _D3std8typecons__T5TupleTkTmZQl6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTkTmZQl8__xopCmpMxFKxSQBsQBr__TQBlTkTmZQBtZi@Base 12
+ _D3std8typecons__T5TupleTkTmZQl__T5opCmpTxSQBpQBo__TQBiTkTmZQBqZQBeMxFNaNbNiNfxQBlZi@Base 12
+ _D3std8typecons__T5TupleTkTmZQl__T8opEqualsTxSQBsQBr__TQBlTkTmZQBtZQBhMxFNaNbNiNfxQBlZb@Base 12
+ _D3std8typecons__T5TupleTkVAyaa4_64617461TmVQra5_636f756e74ZQBq11__xopEqualsMxFKxSQDcQDb__TQCvTkVQCsa4_64617461TmVQDja5_636f756e74ZQEjZb@Base 12
+ _D3std8typecons__T5TupleTkVAyaa4_64617461TmVQra5_636f756e74ZQBq12_Tuple_superMNgFNaNbNcNdNiNeZNgSQDrQDq__TQDkTkTmZQDs@Base 12
+ _D3std8typecons__T5TupleTkVAyaa4_64617461TmVQra5_636f756e74ZQBq6__ctorMFNaNbNcNiNfkmZSQDgQDf__TQCzTkVQCwa4_64617461TmVQDna5_636f756e74ZQEn@Base 12
+ _D3std8typecons__T5TupleTkVAyaa4_64617461TmVQra5_636f756e74ZQBq6__initZ@Base 12
+ _D3std8typecons__T5TupleTkVAyaa4_64617461TmVQra5_636f756e74ZQBq6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTkVAyaa4_64617461TmVQra5_636f756e74ZQBq8__xopCmpMxFKxSQCyQCx__TQCrTkVQCoa4_64617461TmVQDfa5_636f756e74ZQEfZi@Base 12
+ _D3std8typecons__T5TupleTkVAyaa4_64617461TmVQra5_636f756e74ZQBq__T5opCmpTxSQCvQCu__TQCoTkVQCla4_64617461TmVQDca5_636f756e74ZQEcZQCkMxFNaNbNiNfxQCrZi@Base 12
+ _D3std8typecons__T5TupleTkVAyaa4_64617461TmVQra5_636f756e74ZQBq__T8opEqualsTxSQCyQCx__TQCrTkVQCoa4_64617461TmVQDfa5_636f756e74ZQEfZQCnMxFNaNbNiNfxQCrZb@Base 12
+ _D3std8typecons__T5TupleTmTmZQl11__xopEqualsMxFKxSQBwQBv__TQBpTmTmZQBxZb@Base 12
+ _D3std8typecons__T5TupleTmTmZQl6__ctorMFNaNbNcNiNfmmZSQCaQBz__TQBtTmTmZQCb@Base 12
+ _D3std8typecons__T5TupleTmTmZQl6__initZ@Base 12
+ _D3std8typecons__T5TupleTmTmZQl6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTmTmZQl8__xopCmpMxFKxSQBsQBr__TQBlTmTmZQBtZi@Base 12
+ _D3std8typecons__T5TupleTmTmZQl__T5opCmpTxSQBpQBo__TQBiTmTmZQBqZQBeMxFNaNbNiNfxQBlZi@Base 12
+ _D3std8typecons__T5TupleTmTmZQl__T8opEqualsTxSQBsQBr__TQBlTmTmZQBtZQBhMxFNaNbNiNfxQBlZb@Base 12
+ _D3std8typecons__T5TupleTmVAyaa3_706f73TmVQpa3_6c656eZQBk11__xopEqualsMxFKxSQCwQCv__TQCpTmVQCma3_706f73TmVQDba3_6c656eZQDxZb@Base 12
+ _D3std8typecons__T5TupleTmVAyaa3_706f73TmVQpa3_6c656eZQBk12_Tuple_superMNgFNaNbNcNdNiNeZNgSQDlQDk__TQDeTmTmZQDm@Base 12
+ _D3std8typecons__T5TupleTmVAyaa3_706f73TmVQpa3_6c656eZQBk6__ctorMFNaNbNcNiNfmmZSQDaQCz__TQCtTmVQCqa3_706f73TmVQDfa3_6c656eZQEb@Base 12
+ _D3std8typecons__T5TupleTmVAyaa3_706f73TmVQpa3_6c656eZQBk6__initZ@Base 12
+ _D3std8typecons__T5TupleTmVAyaa3_706f73TmVQpa3_6c656eZQBk6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTmVAyaa3_706f73TmVQpa3_6c656eZQBk8__xopCmpMxFKxSQCsQCr__TQClTmVQCia3_706f73TmVQCxa3_6c656eZQDtZi@Base 12
+ _D3std8typecons__T5TupleTmVAyaa3_706f73TmVQpa3_6c656eZQBk__T5opCmpTxSQCpQCo__TQCiTmVQCfa3_706f73TmVQCua3_6c656eZQDqZQCeMxFNaNbNiNfxQClZi@Base 12
+ _D3std8typecons__T5TupleTmVAyaa3_706f73TmVQpa3_6c656eZQBk__T8opEqualsTxSQCsQCr__TQClTmVQCia3_706f73TmVQCxa3_6c656eZQDtZQChMxFNaNbNiNfxQClZb@Base 12
+ _D3std8typecons__T5TupleTuTaZQl11__xopEqualsMxFKxSQBwQBv__TQBpTuTaZQBxZb@Base 12
+ _D3std8typecons__T5TupleTuTaZQl6__ctorMFNaNbNcNiNfuaZSQCaQBz__TQBtTuTaZQCb@Base 12
+ _D3std8typecons__T5TupleTuTaZQl6__initZ@Base 12
+ _D3std8typecons__T5TupleTuTaZQl6toHashMxFNaNbNiNfZm@Base 12
+ _D3std8typecons__T5TupleTuTaZQl8__xopCmpMxFKxSQBsQBr__TQBlTuTaZQBtZi@Base 12
+ _D3std8typecons__T5TupleTuTaZQl__T5opCmpTxSQBpQBo__TQBiTuTaZQBqZQBeMxFNaNbNiNfxQBlZi@Base 12
+ _D3std8typecons__T5TupleTuTaZQl__T8opEqualsTxSQBsQBr__TQBlTuTaZQBtZQBhMxFNaNbNiNfxQBlZb@Base 12
+ _D3std8typecons__T5tupleVAyaa4_64617461VQpa5_636f756e74Z__TQBpTkTmZQBxFNaNbNiNfkmZSQDdQDc__T5TupleTkVQCya4_64617461TmVQDpa5_636f756e74ZQBr@Base 12
+ _D3std8typecons__T5tupleZ__TQkTC15TypeInfo_StructTPG32hZQBmFNaNbNiNfQBlQvZSQCvQCu__T5TupleTQCiTQBtZQp@Base 12
+ _D3std8typecons__T5tupleZ__TQkTC15TypeInfo_StructTPSQBy11concurrency3TidZQCdFNaNbNiNfQCcQBmZSQDnQDm__T5TupleTQDaTQClZQp@Base 12
+ _D3std8typecons__T5tupleZ__TQkTSQBe3uni__T13InversionListTSQCfQBb8GcPolicyZQBhTEQDaQBw__T16UnicodeSetParserTSQEd5regex8internal6parser__T6ParserTAyaTSQFsQBpQBmQBg7CodeGenZQBiZQDi8OperatorZQGoFNaNbNiNfQGnQEuZSQHyQHx__T5TupleTQHlTQFtZQp@Base 12
+ _D3std8typecons__T5tupleZ__TQkTSQBe3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQClFNaNbNiNfQCkZSQDsQDr__T5TupleTQDfZQl@Base 12
+ _D3std8typecons__T5tupleZ__TQkTSQBe3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQCmFNaNbNiNfQClZSQDtQDs__T5TupleTQDgZQl@Base 12
+ _D3std8typecons__T5tupleZ__TQkTSQBe3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplZQCmFNaNbNiNfQClZSQDtQDs__T5TupleTQDgZQl@Base 12
+ _D3std8typecons__T5tupleZ__TQkTSQBe4conv__T7toCharsVii10TaVEQCg5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQDmFNaNbNiNfQDlZSQEtQEs__T5TupleTQEgZQl@Base 12
+ _D3std8typecons__T5tupleZ__TQkTSQBe5range__T10OnlyResultTaZQpZQBsFNaNbNiNfQBrZSQCzQCy__T5TupleTQCmZQl@Base 12
+ _D3std8typecons__T5tupleZ__TQkTSQBe5range__T4TakeTSQBxQt__T6RepeatTaZQkZQBcZQCgFNaNbNiNfQCfZSQDnQDm__T5TupleTQDaZQl@Base 12
+ _D3std8typecons__T5tupleZ__TQkTSQBe9algorithm9iteration__T12FilterResultSQCt8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda3TSQEq5range__T4iotaTmTxmZQlFmxmZ6ResultZQDyZQFqFNaNbNiNfQFpZSQGxQGw__T5TupleTQGkZQl@Base 12
+ _D3std8typecons__T5tupleZ__TQkTSQBe9algorithm9iteration__T6joinerTSQCnQBjQBc__T9MapResultSQDk8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQFhQEdQDw__T12FilterResultSQGiQCyQCsQCmMxFNbNdZ9__lambda1TSQHo5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGdZQHcFQGyZQyZQJbFNaNbNiNfQJaZSQKiQKh__T5TupleTQJvZQl@Base 12
+ _D3std8typecons__T5tupleZ__TQkTSQBe9algorithm9iteration__T9MapResultSQCp10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQEt3uni21DecompressedIntervalsZQDuZQFmFNaNbNiNfQFlZSQGtQGs__T5TupleTQGgZQl@Base 12
+ _D3std8typecons__T5tupleZ__TQkTSQBe9algorithm9iteration__T9MapResultSQCp10functional__T8unaryFunVAyaa4_615b315dVQpa1_61ZQBhTSQEt3uni21DecompressedIntervalsZQDuZQFmFNaNbNiNfQFlZSQGtQGs__T5TupleTQGgZQl@Base 12
+ _D3std8typecons__T5tupleZ__TQkTbTkZQrFNaNbNiNfbkZSQBwQBv__T5TupleTbTkZQl@Base 12
+ _D3std8typecons__T5tupleZ__TQkTkTkTkZQtFNaNbNiNfkkkZSQBzQBy__T5TupleTkTkTkZQn@Base 12
+ _D3std8typecons__T5tupleZ__TQkTuTaZQrFNaNbNiNfuaZSQBwQBv__T5TupleTuTaZQl@Base 12
+ _D3std9algorithm10comparison11__moduleRefZ@Base 12
+ _D3std9algorithm10comparison12__ModuleInfoZ@Base 12
+ _D3std9algorithm10comparison6EditOp6__initZ@Base 12
+ _D3std9algorithm10comparison__T3cmpTAxhTQeZQmFNaNbNiNfQsQuZi@Base 12
+ _D3std9algorithm10comparison__T3cmpTSQBjQBi9iteration__T12FilterResultS_DQCt3uni__T19comparePropertyNameTaTaZQBaFNaNfAxaQdZ4predFNaNbNiNfwZbTSQFkQFjQEb__T9MapResultSQGh5ascii7toLowerTQCoZQBhZQFfTQGdZQGmFNaNfQGpQGsZi@Base 12
+ _D3std9algorithm10comparison__T3maxTiTmZQjFNaNbNiNfimZm@Base 12
+ _D3std9algorithm10comparison__T3maxTkTkZQjFNaNbNiNfkkZk@Base 12
+ _D3std9algorithm10comparison__T3maxTmTiZQjFNaNbNiNfmiZm@Base 12
+ _D3std9algorithm10comparison__T3maxTmTmZQjFNaNbNiNfmmZm@Base 12
+ _D3std9algorithm10comparison__T3minTPvTQdZQlFNaNbNiNfQrQtZQw@Base 12
+ _D3std9algorithm10comparison__T3minTkTkZQjFNaNbNiNfkkZk@Base 12
+ _D3std9algorithm10comparison__T3minTlTmZQjFNaNbNiNflmZl@Base 12
+ _D3std9algorithm10comparison__T3minTmTiZQjFNaNbNiNfmiZi@Base 12
+ _D3std9algorithm10comparison__T3minTmTmZQjFNaNbNiNfmmZm@Base 12
+ _D3std9algorithm10comparison__T3minTmTyiZQkFNaNbNiNfmyiZyi@Base 12
+ _D3std9algorithm10comparison__T3minTmTymZQkFNaNbNiNfmymZm@Base 12
+ _D3std9algorithm10comparison__T3minTyiTmZQkFNaNbNiNfyimZyi@Base 12
+ _D3std9algorithm10comparison__T3minTymTmZQkFNaNbNiNfymmZym@Base 12
+ _D3std9algorithm10comparison__T3minTymTymZQlFNaNbNiNfymymZym@Base 12
+ _D3std9algorithm10comparison__T5amongSQBkQBjQBc10__lambda82TAyaTQeTQhTQkZQBqFNaNbNiNfQzQBbQBeQBhZk@Base 12
+ _D3std9algorithm10comparison__T5amongSQBkQBjQBc10__lambda82TEQCh6format8internal5write17HasToStringResultTQBuTQByTQCcZQDjFNaNbNiNfQCsQCvQCyQDbZk@Base 12
+ _D3std9algorithm10comparison__T5amongVai105Vai73Z__TQvTyaZQBbFNaNbNiNfyaZk@Base 12
+ _D3std9algorithm10comparison__T5amongVai108Vai76Vai102Vai70Vai105Vai73Z__TQBrTyaZQByFNaNbNiNfyaZk@Base 12
+ _D3std9algorithm10comparison__T5amongVai117Vai108Vai85Vai76Z__TQBgTyaZQBnFNaNbNiNfyaZk@Base 12
+ _D3std9algorithm10comparison__T5amongVai95Vai44Z__TQuTyaZQBaFNaNbNiNfyaZk@Base 12
+ _D3std9algorithm10comparison__T5equalZ__T9equalLoopTSQBzQBy9iteration__T9MapResultSQDd5ascii7toLowerTSQDw3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQDcTSQFzQFyQEa__TQDtSQGpQDmQDjTSQHaQDe__TQDdTAyaZQDlFQiZQCvZQFkZQGtFNaNbNiNfKQGvKQCzZb@Base 12
+ _D3std9algorithm10comparison__T5equalZ__TQkTAxaTAyaZQvFNaNbNiNfQtQrZb@Base 12
+ _D3std9algorithm10comparison__T5equalZ__TQkTSQBrQBq9iteration__T9MapResultSQCv5ascii7toLowerTSQDo3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQDcTSQFrQFqQEa__TQDtSQGhQDmQDjTSQGsQDe__TQDdTAyaZQDlFQiZQCvZQFkZQGvFNaNbNiNfQGuQCxZb@Base 12
+ _D3std9algorithm11__moduleRefZ@Base 12
+ _D3std9algorithm12__ModuleInfoZ@Base 12
+ _D3std9algorithm6setops11__moduleRefZ@Base 12
+ _D3std9algorithm6setops12__ModuleInfoZ@Base 12
+ _D3std9algorithm7sorting11__moduleRefZ@Base 12
+ _D3std9algorithm7sorting12__ModuleInfoZ@Base 12
+ _D3std9algorithm7sorting__T11TimSortImplSQBn3uni__T13InversionListTSQCoQBb8GcPolicyZQBh8sanitizeMFNfZ9__lambda2TSQEhQCu__TQCtTQChZQDb__T9IntervalsTAkZQoZ5Slice6__initZ@Base 12
+ _D3std9algorithm7sorting__T13quickSortImplSQBp10functional__T9binaryFunVAyaa17_612e74696d6554203c20622e74696d6554VQBqa1_61VQBza1_62ZQCtTASQFg8datetime8timezone13PosixTimeZone10LeapSecondZQGeFNaNbNiNfQClmZv@Base 12
+ _D3std9algorithm7sorting__T13quickSortImplSQBp10functional__T9binaryFunVAyaa17_612e74696d6554203c20622e74696d6554VQBqa1_61VQBza1_62ZQCtTASQFg8datetime8timezone13PosixTimeZone14TempTransitionZQGiFNaNbNiNfQCpmZv@Base 12
+ _D3std9algorithm7sorting__T13quickSortImplSQBp10functional__T9binaryFunVAyaa5_61203c2062VQra1_61VQza1_62ZQBsTAQBmZQDjFNaNbNiNfQrmZv@Base 12
+ _D3std9algorithm7sorting__T18trustedMoveEmplaceTAyaZQzFNaNbNiNeKQqKQtZv@Base 12
+ _D3std9algorithm7sorting__T18trustedMoveEmplaceTCQBv3zip13ArchiveMemberZQBtFNaNbNiNeKQBlKQBpZv@Base 12
+ _D3std9algorithm7sorting__T18trustedMoveEmplaceTSQBv8datetime8timezone13PosixTimeZone10LeapSecondZQCtFNaNbNiNeKQClKQCpZv@Base 12
+ _D3std9algorithm7sorting__T18trustedMoveEmplaceTSQBv8datetime8timezone13PosixTimeZone14TempTransitionZQCxFNaNbNiNeKQCpKQCtZv@Base 12
+ _D3std9algorithm7sorting__T4sortVAyaa17_612e74696d6554203c20622e74696d6554VEQCwQCv8mutation12SwapStrategyi0TASQEe8datetime8timezone13PosixTimeZone10LeapSecondZQFcFNaNbNiNfQClZSQGs5range__T11SortedRangeTQDqVQGra17_612e74696d6554203c20622e74696d6554VEQJnQCv18SortedRangeOptionsi0ZQDm@Base 12
+ _D3std9algorithm7sorting__T4sortVAyaa17_612e74696d6554203c20622e74696d6554VEQCwQCv8mutation12SwapStrategyi0TASQEe8datetime8timezone13PosixTimeZone14TempTransitionZQFgFNaNbNiNfQCpZSQGw5range__T11SortedRangeTQDuVQGva17_612e74696d6554203c20622e74696d6554VEQJrQCv18SortedRangeOptionsi0ZQDm@Base 12
+ _D3std9algorithm7sorting__T4sortVAyaa5_61203c2062VEQBxQBw8mutation12SwapStrategyi0TAQBzZQCjFNaNbNiNfQrZSQDy5range__T11SortedRangeTQBvVQDxa5_61203c2062VEQFuQBw18SortedRangeOptionsi0ZQCn@Base 12
+ _D3std9algorithm7sorting__T5sort5SQBg10functional__T9binaryFunVAyaa17_612e74696d6554203c20622e74696d6554VQBqa1_61VQBza1_62ZQCtTASQEx8datetime8timezone13PosixTimeZone10LeapSecondZQFvFNaNbNiNfQClZv@Base 12
+ _D3std9algorithm7sorting__T5sort5SQBg10functional__T9binaryFunVAyaa17_612e74696d6554203c20622e74696d6554VQBqa1_61VQBza1_62ZQCtTASQEx8datetime8timezone13PosixTimeZone14TempTransitionZQFzFNaNbNiNfQCpZv@Base 12
+ _D3std9algorithm7sorting__T5sort5SQBg10functional__T9binaryFunVAyaa5_61203c2062VQra1_61VQza1_62ZQBsTAQBmZQDaFNaNbNiNfQrZv@Base 12
+ _D3std9algorithm7sorting__T7HeapOpsSQBi10functional__T9binaryFunVAyaa17_612e74696d6554203c20622e74696d6554VQBqa1_61VQBza1_62ZQCtTASQEz8datetime8timezone13PosixTimeZone10LeapSecondZ__T6isHeapZQiFNaNbNiNfQCvZb@Base 12
+ _D3std9algorithm7sorting__T7HeapOpsSQBi10functional__T9binaryFunVAyaa17_612e74696d6554203c20622e74696d6554VQBqa1_61VQBza1_62ZQCtTASQEz8datetime8timezone13PosixTimeZone10LeapSecondZ__T8heapSortZQkFNaNbNiNfQCxZv@Base 12
+ _D3std9algorithm7sorting__T7HeapOpsSQBi10functional__T9binaryFunVAyaa17_612e74696d6554203c20622e74696d6554VQBqa1_61VQBza1_62ZQCtTASQEz8datetime8timezone13PosixTimeZone10LeapSecondZ__T8siftDownZQkFNaNbNiNfQCxmymZv@Base 12
+ _D3std9algorithm7sorting__T7HeapOpsSQBi10functional__T9binaryFunVAyaa17_612e74696d6554203c20622e74696d6554VQBqa1_61VQBza1_62ZQCtTASQEz8datetime8timezone13PosixTimeZone10LeapSecondZ__T9buildHeapZQlFNaNbNiNfQCyZv@Base 12
+ _D3std9algorithm7sorting__T7HeapOpsSQBi10functional__T9binaryFunVAyaa17_612e74696d6554203c20622e74696d6554VQBqa1_61VQBza1_62ZQCtTASQEz8datetime8timezone13PosixTimeZone10LeapSecondZ__T9percolateZQlFNaNbNiNfQCymymZv@Base 12
+ _D3std9algorithm7sorting__T7HeapOpsSQBi10functional__T9binaryFunVAyaa17_612e74696d6554203c20622e74696d6554VQBqa1_61VQBza1_62ZQCtTASQEz8datetime8timezone13PosixTimeZone14TempTransitionZ__T6isHeapZQiFNaNbNiNfQCzZb@Base 12
+ _D3std9algorithm7sorting__T7HeapOpsSQBi10functional__T9binaryFunVAyaa17_612e74696d6554203c20622e74696d6554VQBqa1_61VQBza1_62ZQCtTASQEz8datetime8timezone13PosixTimeZone14TempTransitionZ__T8heapSortZQkFNaNbNiNfQDbZv@Base 12
+ _D3std9algorithm7sorting__T7HeapOpsSQBi10functional__T9binaryFunVAyaa17_612e74696d6554203c20622e74696d6554VQBqa1_61VQBza1_62ZQCtTASQEz8datetime8timezone13PosixTimeZone14TempTransitionZ__T8siftDownZQkFNaNbNiNfQDbmymZv@Base 12
+ _D3std9algorithm7sorting__T7HeapOpsSQBi10functional__T9binaryFunVAyaa17_612e74696d6554203c20622e74696d6554VQBqa1_61VQBza1_62ZQCtTASQEz8datetime8timezone13PosixTimeZone14TempTransitionZ__T9buildHeapZQlFNaNbNiNfQDcZv@Base 12
+ _D3std9algorithm7sorting__T7HeapOpsSQBi10functional__T9binaryFunVAyaa17_612e74696d6554203c20622e74696d6554VQBqa1_61VQBza1_62ZQCtTASQEz8datetime8timezone13PosixTimeZone14TempTransitionZ__T9percolateZQlFNaNbNiNfQDcmymZv@Base 12
+ _D3std9algorithm7sorting__T7HeapOpsSQBi10functional__T9binaryFunVAyaa5_61203c2062VQra1_61VQza1_62ZQBsTAQBmZ__T6isHeapZQiFNaNbNiNfQBbZb@Base 12
+ _D3std9algorithm7sorting__T7HeapOpsSQBi10functional__T9binaryFunVAyaa5_61203c2062VQra1_61VQza1_62ZQBsTAQBmZ__T8heapSortZQkFNaNbNiNfQBdZv@Base 12
+ _D3std9algorithm7sorting__T7HeapOpsSQBi10functional__T9binaryFunVAyaa5_61203c2062VQra1_61VQza1_62ZQBsTAQBmZ__T8siftDownZQkFNaNbNiNfQBdmymZv@Base 12
+ _D3std9algorithm7sorting__T7HeapOpsSQBi10functional__T9binaryFunVAyaa5_61203c2062VQra1_61VQza1_62ZQBsTAQBmZ__T9buildHeapZQlFNaNbNiNfQBeZv@Base 12
+ _D3std9algorithm7sorting__T7HeapOpsSQBi10functional__T9binaryFunVAyaa5_61203c2062VQra1_61VQza1_62ZQBsTAQBmZ__T9percolateZQlFNaNbNiNfQBemymZv@Base 12
+ _D3std9algorithm7sorting__T8getPivotSQBj10functional__T9binaryFunVAyaa17_612e74696d6554203c20622e74696d6554VQBqa1_61VQBza1_62ZQCtTASQFa8datetime8timezone13PosixTimeZone10LeapSecondZQFyFNaNbNiNfQClZm@Base 12
+ _D3std9algorithm7sorting__T8getPivotSQBj10functional__T9binaryFunVAyaa17_612e74696d6554203c20622e74696d6554VQBqa1_61VQBza1_62ZQCtTASQFa8datetime8timezone13PosixTimeZone14TempTransitionZQGcFNaNbNiNfQCpZm@Base 12
+ _D3std9algorithm7sorting__T8getPivotSQBj10functional__T9binaryFunVAyaa5_61203c2062VQra1_61VQza1_62ZQBsTAQBmZQDdFNaNbNiNfQrZm@Base 12
+ _D3std9algorithm7sorting__T8isSortedSQBj10functional__T9binaryFunVAyaa17_612e74696d6554203c20622e74696d6554VQBqa1_61VQBza1_62ZQCtTASQFa8datetime8timezone13PosixTimeZone10LeapSecondZQFyFNaNbNiNfQClZb@Base 12
+ _D3std9algorithm7sorting__T8isSortedSQBj10functional__T9binaryFunVAyaa17_612e74696d6554203c20622e74696d6554VQBqa1_61VQBza1_62ZQCtTASQFa8datetime8timezone13PosixTimeZone14TempTransitionZQGcFNaNbNiNfQCpZb@Base 12
+ _D3std9algorithm7sorting__T8isSortedSQBj10functional__T9binaryFunVAyaa5_61203c2062VQra1_61VQza1_62ZQBsTAQBmZQDdFNaNbNiNfQrZb@Base 12
+ _D3std9algorithm7sorting__T8medianOfSQBj10functional__T9binaryFunVAyaa17_612e74696d6554203c20622e74696d6554VQBqa1_61VQBza1_62ZQCtVEQEz8typecons__T4FlagVQDia9_6c65616e5269676874ZQBfi0TASQHb8datetime8timezone13PosixTimeZone10LeapSecondTmTmTmTmTmZQIjFNaNbNiNfQCvmmmmmZv@Base 12
+ _D3std9algorithm7sorting__T8medianOfSQBj10functional__T9binaryFunVAyaa17_612e74696d6554203c20622e74696d6554VQBqa1_61VQBza1_62ZQCtVEQEz8typecons__T4FlagVQDia9_6c65616e5269676874ZQBfi0TASQHb8datetime8timezone13PosixTimeZone10LeapSecondTmTmTmZQIfFNaNbNiNfQCrmmmZv@Base 12
+ _D3std9algorithm7sorting__T8medianOfSQBj10functional__T9binaryFunVAyaa17_612e74696d6554203c20622e74696d6554VQBqa1_61VQBza1_62ZQCtVEQEz8typecons__T4FlagVQDia9_6c65616e5269676874ZQBfi0TASQHb8datetime8timezone13PosixTimeZone14TempTransitionTmTmTmTmTmZQInFNaNbNiNfQCzmmmmmZv@Base 12
+ _D3std9algorithm7sorting__T8medianOfSQBj10functional__T9binaryFunVAyaa17_612e74696d6554203c20622e74696d6554VQBqa1_61VQBza1_62ZQCtVEQEz8typecons__T4FlagVQDia9_6c65616e5269676874ZQBfi0TASQHb8datetime8timezone13PosixTimeZone14TempTransitionTmTmTmZQIjFNaNbNiNfQCvmmmZv@Base 12
+ _D3std9algorithm7sorting__T8medianOfSQBj10functional__T9binaryFunVAyaa5_61203c2062VQra1_61VQza1_62ZQBsVEQDy8typecons__T4FlagVQCha9_6c65616e5269676874ZQBfi0TAQDnTmTmTmTmTmZQFoFNaNbNiNfQBbmmmmmZv@Base 12
+ _D3std9algorithm7sorting__T8medianOfSQBj10functional__T9binaryFunVAyaa5_61203c2062VQra1_61VQza1_62ZQBsVEQDy8typecons__T4FlagVQCha9_6c65616e5269676874ZQBfi0TAQDnTmTmTmZQFkFNaNbNiNfQxmmmZv@Base 12
+ _D3std9algorithm7sorting__T9shortSortSQBk10functional__T9binaryFunVAyaa17_612e74696d6554203c20622e74696d6554VQBqa1_61VQBza1_62ZQCtTASQFb8datetime8timezone13PosixTimeZone10LeapSecondZQFzFNaNbNiNfQClZv@Base 12
+ _D3std9algorithm7sorting__T9shortSortSQBk10functional__T9binaryFunVAyaa17_612e74696d6554203c20622e74696d6554VQBqa1_61VQBza1_62ZQCtTASQFb8datetime8timezone13PosixTimeZone14TempTransitionZQGdFNaNbNiNfQCpZv@Base 12
+ _D3std9algorithm7sorting__T9shortSortSQBk10functional__T9binaryFunVAyaa5_61203c2062VQra1_61VQza1_62ZQBsTAQBmZQDeFNaNbNiNfQrZv@Base 12
+ _D3std9algorithm8internal11__moduleRefZ@Base 12
+ _D3std9algorithm8internal12__ModuleInfoZ@Base 12
+ _D3std9algorithm8mutation11__moduleRefZ@Base 12
+ _D3std9algorithm8mutation12__ModuleInfoZ@Base 12
+ _D3std9algorithm8mutation__T10removeImplVEQBoQBnQBg12SwapStrategyi0TAAyaTlZQBvFNaNbNiNfQtlZQx@Base 12
+ _D3std9algorithm8mutation__T10removeImplVEQBoQBnQBg12SwapStrategyi2TAC4core6thread5fiber5FiberTmZQCrFNaNbNiNfQBpmZQBu@Base 12
+ _D3std9algorithm8mutation__T11moveAllImplSQBoQBnQBg4moveTAC4core6thread5fiber5FiberTQBbZQCiFNaNbNiNfKQBsKQBwZQCa@Base 12
+ _D3std9algorithm8mutation__T11moveEmplaceTSQBp3net4curl3FTP4ImplZQBlFNaNbNiKQBiKQBmZv@Base 12
+ _D3std9algorithm8mutation__T11moveEmplaceTSQBp3net4curl4HTTP4ImplZQBmFNaNbNiKQBjKQBnZv@Base 12
+ _D3std9algorithm8mutation__T11moveEmplaceTSQBp3net4curl4SMTP4ImplZQBmFNaNbNiKQBjKQBnZv@Base 12
+ _D3std9algorithm8mutation__T11moveEmplaceTSQBp4file15DirIteratorImplZQBpFNaNbNiKQBmKQBqZv@Base 12
+ _D3std9algorithm8mutation__T12removeStableTAC4core6thread5fiber5FiberTmZQBsFNaNbNiNfQBpmZQBu@Base 12
+ _D3std9algorithm8mutation__T14removeUnstableTAAyaTlZQyFNaNbNiNfQslZQw@Base 12
+ _D3std9algorithm8mutation__T15moveEmplaceImplTAAyaZQxFNaNbNiNfMKQsNkMKQyZv@Base 12
+ _D3std9algorithm8mutation__T15moveEmplaceImplTACQBu3zip13ArchiveMemberZQBrFNaNbNiNfMKQBnNkMKQBuZv@Base 12
+ _D3std9algorithm8mutation__T15moveEmplaceImplTASQBu5regex8internal2ir10NamedGroupZQCcFNaNbNiNfMKQByNkMKQCfZv@Base 12
+ _D3std9algorithm8mutation__T15moveEmplaceImplTASQBu8datetime8timezone13PosixTimeZone10LeapSecondZQCrFNaNbNiNfMKQCnNkMKQCuZv@Base 12
+ _D3std9algorithm8mutation__T15moveEmplaceImplTASQBu8datetime8timezone13PosixTimeZone14TempTransitionZQCvFNaNbNiNfMKQCrNkMKQCyZv@Base 12
+ _D3std9algorithm8mutation__T15moveEmplaceImplTAkZQvFNaNbNiNfMKQqNkMKQwZv@Base 12
+ _D3std9algorithm8mutation__T15moveEmplaceImplTAyaZQwFNaNbNiNfMKQrNkMKQxZv@Base 12
+ _D3std9algorithm8mutation__T15moveEmplaceImplTC4core6thread5fiber5FiberZQBsFNaNbNiNfMKQBoNkMKQBvZv@Base 12
+ _D3std9algorithm8mutation__T15moveEmplaceImplTSQBt3net4curl3FTP4ImplZQBpFNaNbNiNfMKQBlNkMKQBsZv@Base 12
+ _D3std9algorithm8mutation__T15moveEmplaceImplTSQBt3net4curl4HTTP4ImplZQBqFNaNbNiNfMKQBmNkMKQBtZv@Base 12
+ _D3std9algorithm8mutation__T15moveEmplaceImplTSQBt3net4curl4SMTP4ImplZQBqFNaNbNiNfMKQBmNkMKQBtZv@Base 12
+ _D3std9algorithm8mutation__T15moveEmplaceImplTSQBt3uni17CodepointIntervalZQBuFNaNbNiNfKQBpKQBtZv@Base 12
+ _D3std9algorithm8mutation__T15moveEmplaceImplTSQBt3uni__T13InversionListTSQCuQBb8GcPolicyZQBh__T9IntervalsTAkZQoZQDhFNaNbNiNfMKQDdNkMKQDkZv@Base 12
+ _D3std9algorithm8mutation__T15moveEmplaceImplTSQBt4file15DirIteratorImplZQBtFNaNbNiNfMKQBpNkMKQBwZv@Base 12
+ _D3std9algorithm8mutation__T15moveEmplaceImplTSQBtQBs9iteration__T9MapResultSQCx10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQFf8internal14unicode_tables9CompEntryZQEgZQFwFNaNbNiNfMKQFsNkMKQFzZv@Base 12
+ _D3std9algorithm8mutation__T15moveEmplaceImplTSQBtQBs9iteration__T9MapResultSQCx10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQFh8internal14unicode_tables15UnicodePropertyZQEpZQGfFNaNbNiNfMKQGbNkMKQGiZv@Base 12
+ _D3std9algorithm8mutation__T15trustedMoveImplTAAyaZQxFNaNbNiNeNkMKQuZQx@Base 12
+ _D3std9algorithm8mutation__T15trustedMoveImplTACQBu3zip13ArchiveMemberZQBrFNaNbNiNeNkMKQBpZQBt@Base 12
+ _D3std9algorithm8mutation__T15trustedMoveImplTASQBu5regex8internal2ir10NamedGroupZQCcFNaNbNiNeNkMKQCaZQCe@Base 12
+ _D3std9algorithm8mutation__T15trustedMoveImplTASQBu8datetime8timezone13PosixTimeZone10LeapSecondZQCrFNaNbNiNeNkMKQCpZQCt@Base 12
+ _D3std9algorithm8mutation__T15trustedMoveImplTASQBu8datetime8timezone13PosixTimeZone14TempTransitionZQCvFNaNbNiNeNkMKQCtZQCx@Base 12
+ _D3std9algorithm8mutation__T15trustedMoveImplTAkZQvFNaNbNiNeNkMKQsZQv@Base 12
+ _D3std9algorithm8mutation__T15trustedMoveImplTSQBt3uni17CodepointIntervalZQBuFNaNbNiNeKQBpZQBt@Base 12
+ _D3std9algorithm8mutation__T15trustedMoveImplTSQBt3uni__T13InversionListTSQCuQBb8GcPolicyZQBh__T9IntervalsTAkZQoZQDhFNaNbNiNeNkMKQDfZQDj@Base 12
+ _D3std9algorithm8mutation__T15trustedMoveImplTSQBtQBs9iteration__T9MapResultSQCx10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQFf8internal14unicode_tables9CompEntryZQEgZQFwFNaNbNiNeNkMKQFuZQFy@Base 12
+ _D3std9algorithm8mutation__T15trustedMoveImplTSQBtQBs9iteration__T9MapResultSQCx10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQFh8internal14unicode_tables15UnicodePropertyZQEpZQGfFNaNbNiNeNkMKQGdZQGh@Base 12
+ _D3std9algorithm8mutation__T4copyTASQBi5regex8internal2ir8BytecodeTQBhZQBrFNaNbNiNfQBxQCaZQCe@Base 12
+ _D3std9algorithm8mutation__T4copyTAiTAkZQmFNaNbNiNfQrQqZQt@Base 12
+ _D3std9algorithm8mutation__T4copyTAkTQdZQmFNaNbNiNfQrQtZQw@Base 12
+ _D3std9algorithm8mutation__T4copyTSQBh3uni__T13InversionListTSQCiQBb8GcPolicyZQBh__T9IntervalsTAkZQoTASQDxQCq17CodepointIntervalZQDxFNaNbNiNfQEdQBrZQBv@Base 12
+ _D3std9algorithm8mutation__T4copyTSQBh5range__T10roundRobinTSQChQCg9iteration__T9MapResultSQDl10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQFp3uni21DecompressedIntervalsZQDuTSQGzQGyQEs__TQElSQHpQEe__TQDvVQDpa4_615b315dVQEea1_61ZQExTQDqZQGiZQHtFQHkQCvZ6ResultTAkZQJiFNaNfQJkQoZQr@Base 12
+ _D3std9algorithm8mutation__T4moveTAAyaZQlFNaNbNiNfNkMKQuZQx@Base 12
+ _D3std9algorithm8mutation__T4moveTACQBi3zip13ArchiveMemberZQBfFNaNbNiNfNkMKQBpZQBt@Base 12
+ _D3std9algorithm8mutation__T4moveTASQBi5regex8internal2ir10NamedGroupZQBqFNaNbNiNfNkMKQCaZQCe@Base 12
+ _D3std9algorithm8mutation__T4moveTASQBi8datetime8timezone13PosixTimeZone10LeapSecondZQCfFNaNbNiNfNkMKQCpZQCt@Base 12
+ _D3std9algorithm8mutation__T4moveTASQBi8datetime8timezone13PosixTimeZone14TempTransitionZQCjFNaNbNiNfNkMKQCtZQCx@Base 12
+ _D3std9algorithm8mutation__T4moveTAkZQjFNaNbNiNfNkMKQsZQv@Base 12
+ _D3std9algorithm8mutation__T4moveTAyaZQkFNaNbNiNfKQqKQtZv@Base 12
+ _D3std9algorithm8mutation__T4moveTC4core6thread5fiber5FiberZQBgFNaNbNiNfKQBnKQBrZv@Base 12
+ _D3std9algorithm8mutation__T4moveTSQBh3net4curl3FTP4ImplZQBdFKQBcKQBgZv@Base 12
+ _D3std9algorithm8mutation__T4moveTSQBh3net4curl4HTTP4ImplZQBeFKQBdKQBhZv@Base 12
+ _D3std9algorithm8mutation__T4moveTSQBh3net4curl4SMTP4ImplZQBeFKQBdKQBhZv@Base 12
+ _D3std9algorithm8mutation__T4moveTSQBh3uni17CodepointIntervalZQBiFNaNbNiNfKQBpZQBt@Base 12
+ _D3std9algorithm8mutation__T4moveTSQBh3uni__T13InversionListTSQCiQBb8GcPolicyZQBh__T9IntervalsTAkZQoZQCvFNaNbNiNfNkMKQDfZQDj@Base 12
+ _D3std9algorithm8mutation__T4moveTSQBh4file15DirIteratorImplZQBhFKQBgKQBkZv@Base 12
+ _D3std9algorithm8mutation__T4moveTSQBhQBg9iteration__T9MapResultSQCl10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQEt8internal14unicode_tables9CompEntryZQEgZQFkFNaNbNiNfNkMKQFuZQFy@Base 12
+ _D3std9algorithm8mutation__T4moveTSQBhQBg9iteration__T9MapResultSQCl10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEv8internal14unicode_tables15UnicodePropertyZQEpZQFtFNaNbNiNfNkMKQGdZQGh@Base 12
+ _D3std9algorithm8mutation__T4swapTAAyaZQlFNaNbNiNeKQrKQuZv@Base 12
+ _D3std9algorithm8mutation__T4swapTACQBi3zip13ArchiveMemberZQBfFNaNbNiNeKQBmKQBqZv@Base 12
+ _D3std9algorithm8mutation__T4swapTASQBi8datetime8timezone13PosixTimeZone10LeapSecondZQCfFNaNbNiNeKQCmKQCqZv@Base 12
+ _D3std9algorithm8mutation__T4swapTASQBi8datetime8timezone13PosixTimeZone14TempTransitionZQCjFNaNbNiNeKQCqKQCuZv@Base 12
+ _D3std9algorithm8mutation__T4swapTAyaZQkFNaNbNiNeKQqKQtZv@Base 12
+ _D3std9algorithm8mutation__T4swapTCQBh3zip13ArchiveMemberZQBeFNaNbNiNeKQBlKQBpZv@Base 12
+ _D3std9algorithm8mutation__T4swapTPSQBi8typecons__T10RefCountedTSQCl3net4curl3FTP4ImplVEQDiQCa24RefCountedAutoInitializei1ZQCu15RefCountedStoreQCkZQEpFNaNbNiNeKQEwKQFaZv@Base 12
+ _D3std9algorithm8mutation__T4swapTPSQBi8typecons__T10RefCountedTSQCl3net4curl4HTTP4ImplVEQDjQCb24RefCountedAutoInitializei1ZQCv15RefCountedStoreQCkZQEqFNaNbNiNeKQExKQFbZv@Base 12
+ _D3std9algorithm8mutation__T4swapTPSQBi8typecons__T10RefCountedTSQCl3net4curl4SMTP4ImplVEQDjQCb24RefCountedAutoInitializei1ZQCv15RefCountedStoreQCkZQEqFNaNbNiNeKQExKQFbZv@Base 12
+ _D3std9algorithm8mutation__T4swapTPSQBi8typecons__T10RefCountedTSQCl4file15DirIteratorImplVEQDmQCe24RefCountedAutoInitializei0ZQCy15RefCountedStore4ImplZQEvFNaNbNiNeKQFcKQFgZv@Base 12
+ _D3std9algorithm8mutation__T4swapTSQBh5stdio17LockingTextReaderZQBkFNaNbNiNeKQBrKQBvZv@Base 12
+ _D3std9algorithm8mutation__T4swapTSQBh5stdio4FileZQwFNaNbNiNeKQBcKQBgZv@Base 12
+ _D3std9algorithm8mutation__T4swapTSQBh8datetime8timezone13PosixTimeZone10LeapSecondZQCeFNaNbNiNeKQClKQCpZv@Base 12
+ _D3std9algorithm8mutation__T4swapTSQBh8datetime8timezone13PosixTimeZone14TempTransitionZQCiFNaNbNiNeKQCpKQCtZv@Base 12
+ _D3std9algorithm8mutation__T4swapThZQiFNaNbNiNeKhKhZv@Base 12
+ _D3std9algorithm8mutation__T6removeVEQBjQBiQBb12SwapStrategyi0TAAyaTlZQBqFNaNbNiNfQtlZQx@Base 12
+ _D3std9algorithm8mutation__T6removeVEQBjQBiQBb12SwapStrategyi2TAC4core6thread5fiber5FiberTmZQCmFNaNbNiNfQBpmZQBu@Base 12
+ _D3std9algorithm8mutation__T6swapAtTAAyaZQnFNaNbNiNfKQrmmZv@Base 12
+ _D3std9algorithm8mutation__T6swapAtTACQBk3zip13ArchiveMemberZQBhFNaNbNiNfKQBmmmZv@Base 12
+ _D3std9algorithm8mutation__T6swapAtTASQBk8datetime8timezone13PosixTimeZone10LeapSecondZQChFNaNbNiNfKQCmmmZv@Base 12
+ _D3std9algorithm8mutation__T6swapAtTASQBk8datetime8timezone13PosixTimeZone14TempTransitionZQClFNaNbNiNfKQCqmmZv@Base 12
+ _D3std9algorithm8mutation__T6swapAtTAhZQlFNaNbNiNfKQpmmZv@Base 12
+ _D3std9algorithm8mutation__T6swapAtTSQBj3uni__T13InversionListTSQCkQBb8GcPolicyZQBh__T9IntervalsTAkZQoZQCxFNaNbNiNfKQDcmmZv@Base 12
+ _D3std9algorithm8mutation__T7moveAllTAC4core6thread5fiber5FiberTQBbZQBoFNaNbNiNfQBrQBuZQBy@Base 12
+ _D3std9algorithm8mutation__T7reverseTAhZQmFNaNbNiNfQoZQr@Base 12
+ _D3std9algorithm8mutation__T7reverseTSQBk3uni__T13InversionListTSQClQBb8GcPolicyZQBh__T9IntervalsTAkZQoZQCyFNaNbNiNfQDbZQDf@Base 12
+ _D3std9algorithm8mutation__T8moveImplTAAyaZQpFNaNbNiNfNkMKQuZQx@Base 12
+ _D3std9algorithm8mutation__T8moveImplTACQBm3zip13ArchiveMemberZQBjFNaNbNiNfNkMKQBpZQBt@Base 12
+ _D3std9algorithm8mutation__T8moveImplTASQBm5regex8internal2ir10NamedGroupZQBuFNaNbNiNfNkMKQCaZQCe@Base 12
+ _D3std9algorithm8mutation__T8moveImplTASQBm8datetime8timezone13PosixTimeZone10LeapSecondZQCjFNaNbNiNfNkMKQCpZQCt@Base 12
+ _D3std9algorithm8mutation__T8moveImplTASQBm8datetime8timezone13PosixTimeZone14TempTransitionZQCnFNaNbNiNfNkMKQCtZQCx@Base 12
+ _D3std9algorithm8mutation__T8moveImplTAkZQnFNaNbNiNfNkMKQsZQv@Base 12
+ _D3std9algorithm8mutation__T8moveImplTAyaZQoFNaNbNiNfMKQrNkMKQxZv@Base 12
+ _D3std9algorithm8mutation__T8moveImplTC4core6thread5fiber5FiberZQBkFNaNbNiNfMKQBoNkMKQBvZv@Base 12
+ _D3std9algorithm8mutation__T8moveImplTSQBl3net4curl3FTP4ImplZQBhFMKQBdNkMKQBkZv@Base 12
+ _D3std9algorithm8mutation__T8moveImplTSQBl3net4curl4HTTP4ImplZQBiFMKQBeNkMKQBlZv@Base 12
+ _D3std9algorithm8mutation__T8moveImplTSQBl3net4curl4SMTP4ImplZQBiFMKQBeNkMKQBlZv@Base 12
+ _D3std9algorithm8mutation__T8moveImplTSQBl3uni17CodepointIntervalZQBmFNaNbNiNfKQBpZQBt@Base 12
+ _D3std9algorithm8mutation__T8moveImplTSQBl3uni__T13InversionListTSQCmQBb8GcPolicyZQBh__T9IntervalsTAkZQoZQCzFNaNbNiNfNkMKQDfZQDj@Base 12
+ _D3std9algorithm8mutation__T8moveImplTSQBl4file15DirIteratorImplZQBlFMKQBhNkMKQBoZv@Base 12
+ _D3std9algorithm8mutation__T8moveImplTSQBlQBk9iteration__T9MapResultSQCp10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQEx8internal14unicode_tables9CompEntryZQEgZQFoFNaNbNiNfNkMKQFuZQFy@Base 12
+ _D3std9algorithm8mutation__T8moveImplTSQBlQBk9iteration__T9MapResultSQCp10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQEz8internal14unicode_tables15UnicodePropertyZQEpZQFxFNaNbNiNfNkMKQGdZQGh@Base 12
+ _D3std9algorithm9iteration11__moduleRefZ@Base 12
+ _D3std9algorithm9iteration12__ModuleInfoZ@Base 12
+ _D3std9algorithm9iteration__T10UniqResultSQBo10functional__T9binaryFunVAyaa6_61203d3d2062VQta1_61VQBba1_62ZQBvTSQEg5range__T11SortedRangeTAQCqVQCua5_61203c2062VEQGdQBx18SortedRangeOptionsi0ZQCoZQGj11__xopEqualsMxFKxSQIgQIfQHy__TQHrSQIwQHi__TQGzVQGsa6_61203d3d2062VQHla1_61VQHua1_62ZQIoTQGtZQKbZb@Base 12
+ _D3std9algorithm9iteration__T10UniqResultSQBo10functional__T9binaryFunVAyaa6_61203d3d2062VQta1_61VQBba1_62ZQBvTSQEg5range__T11SortedRangeTAQCqVQCua5_61203c2062VEQGdQBx18SortedRangeOptionsi0ZQCoZQGj4backMFNaNbNdNiNfZQFo@Base 12
+ _D3std9algorithm9iteration__T10UniqResultSQBo10functional__T9binaryFunVAyaa6_61203d3d2062VQta1_61VQBba1_62ZQBvTSQEg5range__T11SortedRangeTAQCqVQCua5_61203c2062VEQGdQBx18SortedRangeOptionsi0ZQCoZQGj4saveMFNaNbNdNiNfZSQIgQIfQHy__TQHrSQIwQHi__TQGzVQGsa6_61203d3d2062VQHla1_61VQHua1_62ZQIoTQGtZQKb@Base 12
+ _D3std9algorithm9iteration__T10UniqResultSQBo10functional__T9binaryFunVAyaa6_61203d3d2062VQta1_61VQBba1_62ZQBvTSQEg5range__T11SortedRangeTAQCqVQCua5_61203c2062VEQGdQBx18SortedRangeOptionsi0ZQCoZQGj5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std9algorithm9iteration__T10UniqResultSQBo10functional__T9binaryFunVAyaa6_61203d3d2062VQta1_61VQBba1_62ZQBvTSQEg5range__T11SortedRangeTAQCqVQCua5_61203c2062VEQGdQBx18SortedRangeOptionsi0ZQCoZQGj5frontMFNaNbNdNiNfZQFp@Base 12
+ _D3std9algorithm9iteration__T10UniqResultSQBo10functional__T9binaryFunVAyaa6_61203d3d2062VQta1_61VQBba1_62ZQBvTSQEg5range__T11SortedRangeTAQCqVQCua5_61203c2062VEQGdQBx18SortedRangeOptionsi0ZQCoZQGj6__ctorMFNaNbNcNiNfQEbZSQIlQIkQId__TQHwSQJbQHn__TQHeVQGxa6_61203d3d2062VQHqa1_61VQHza1_62ZQItTQGyZQKg@Base 12
+ _D3std9algorithm9iteration__T10UniqResultSQBo10functional__T9binaryFunVAyaa6_61203d3d2062VQta1_61VQBba1_62ZQBvTSQEg5range__T11SortedRangeTAQCqVQCua5_61203c2062VEQGdQBx18SortedRangeOptionsi0ZQCoZQGj6__initZ@Base 12
+ _D3std9algorithm9iteration__T10UniqResultSQBo10functional__T9binaryFunVAyaa6_61203d3d2062VQta1_61VQBba1_62ZQBvTSQEg5range__T11SortedRangeTAQCqVQCua5_61203c2062VEQGdQBx18SortedRangeOptionsi0ZQCoZQGj7opSliceMFNaNbNiNfZSQIhQIgQHz__TQHsSQIxQHj__TQHaVQGta6_61203d3d2062VQHma1_61VQHva1_62ZQIpTQGuZQKc@Base 12
+ _D3std9algorithm9iteration__T10UniqResultSQBo10functional__T9binaryFunVAyaa6_61203d3d2062VQta1_61VQBba1_62ZQBvTSQEg5range__T11SortedRangeTAQCqVQCua5_61203c2062VEQGdQBx18SortedRangeOptionsi0ZQCoZQGj7popBackMFNaNbNiNfZv@Base 12
+ _D3std9algorithm9iteration__T10UniqResultSQBo10functional__T9binaryFunVAyaa6_61203d3d2062VQta1_61VQBba1_62ZQBvTSQEg5range__T11SortedRangeTAQCqVQCua5_61203c2062VEQGdQBx18SortedRangeOptionsi0ZQCoZQGj8popFrontMFNaNbNiNfZv@Base 12
+ _D3std9algorithm9iteration__T10UniqResultSQBo10functional__T9binaryFunVAyaa6_61203d3d2062VQta1_61VQBba1_62ZQBvTSQEg5range__T11SortedRangeTAQCqVQCua5_61203c2062VEQGdQBx18SortedRangeOptionsi0ZQCoZQGj9__xtoHashFNbNeKxSQIfQIeQHx__TQHqSQIvQHh__TQGyVQGra6_61203d3d2062VQHka1_61VQHta1_62ZQInTQGsZQKaZm@Base 12
+ _D3std9algorithm9iteration__T12FilterResultSQBq8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda1TSQDn5range__T4iotaTmTmZQkFmmZ6ResultZQDw4saveMFNaNbNdNiNfZSQFtQFsQFl__TQFeSQGjQEtQEnQEhMxFNbNdZQEcTQDvZQGk@Base 12
+ _D3std9algorithm9iteration__T12FilterResultSQBq8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda1TSQDn5range__T4iotaTmTmZQkFmmZ6ResultZQDw5emptyMFNaNbNdNiZb@Base 12
+ _D3std9algorithm9iteration__T12FilterResultSQBq8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda1TSQDn5range__T4iotaTmTmZQkFmmZ6ResultZQDw5frontMFNaNbNdNiZm@Base 12
+ _D3std9algorithm9iteration__T12FilterResultSQBq8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda1TSQDn5range__T4iotaTmTmZQkFmmZ6ResultZQDw5primeMFNaNbNiZv@Base 12
+ _D3std9algorithm9iteration__T12FilterResultSQBq8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda1TSQDn5range__T4iotaTmTmZQkFmmZ6ResultZQDw6__ctorMFNaNbNcNiNfQChZSQFyQFxQFq__TQFjSQGoQEyQEsQEmMxFNbNdZQEhTQEaZQGp@Base 12
+ _D3std9algorithm9iteration__T12FilterResultSQBq8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda1TSQDn5range__T4iotaTmTmZQkFmmZ6ResultZQDw6__ctorMFNaNbNcNiNfQChbZSQFzQFyQFr__TQFkSQGpQEzQEtQEnMxFNbNdZQEiTQEbZQGq@Base 12
+ _D3std9algorithm9iteration__T12FilterResultSQBq8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda1TSQDn5range__T4iotaTmTmZQkFmmZ6ResultZQDw6__initZ@Base 12
+ _D3std9algorithm9iteration__T12FilterResultSQBq8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda1TSQDn5range__T4iotaTmTmZQkFmmZ6ResultZQDw7opSliceMFNaNbNiNfZSQFuQFtQFm__TQFfSQGkQEuQEoQEiMxFNbNdZQEdTQDwZQGl@Base 12
+ _D3std9algorithm9iteration__T12FilterResultSQBq8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda1TSQDn5range__T4iotaTmTmZQkFmmZ6ResultZQDw8popFrontMFNaNbNiZv@Base 12
+ _D3std9algorithm9iteration__T12FilterResultSQBq8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda3TSQDn5range__T4iotaTmTxmZQlFmxmZ6ResultZQDy4saveMFNaNbNdNiNfZSQFvQFuQFn__TQFgSQGlQEvQEpQEjMxFNbNdZQEeTQDxZQGm@Base 12
+ _D3std9algorithm9iteration__T12FilterResultSQBq8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda3TSQDn5range__T4iotaTmTxmZQlFmxmZ6ResultZQDy5emptyMFNaNbNdNiZb@Base 12
+ _D3std9algorithm9iteration__T12FilterResultSQBq8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda3TSQDn5range__T4iotaTmTxmZQlFmxmZ6ResultZQDy5frontMFNaNbNdNiZm@Base 12
+ _D3std9algorithm9iteration__T12FilterResultSQBq8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda3TSQDn5range__T4iotaTmTxmZQlFmxmZ6ResultZQDy5primeMFNaNbNiZv@Base 12
+ _D3std9algorithm9iteration__T12FilterResultSQBq8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda3TSQDn5range__T4iotaTmTxmZQlFmxmZ6ResultZQDy6__ctorMFNaNbNcNiNfQCjZSQGaQFzQFs__TQFlSQGqQFaQEuQEoMxFNbNdZQEjTQEcZQGr@Base 12
+ _D3std9algorithm9iteration__T12FilterResultSQBq8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda3TSQDn5range__T4iotaTmTxmZQlFmxmZ6ResultZQDy6__ctorMFNaNbNcNiNfQCjbZSQGbQGaQFt__TQFmSQGrQFbQEvQEpMxFNbNdZQEkTQEdZQGs@Base 12
+ _D3std9algorithm9iteration__T12FilterResultSQBq8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda3TSQDn5range__T4iotaTmTxmZQlFmxmZ6ResultZQDy6__initZ@Base 12
+ _D3std9algorithm9iteration__T12FilterResultSQBq8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda3TSQDn5range__T4iotaTmTxmZQlFmxmZ6ResultZQDy7opSliceMFNaNbNiNfZSQFwQFvQFo__TQFhSQGmQEwQEqQEkMxFNbNdZQEfTQDyZQGn@Base 12
+ _D3std9algorithm9iteration__T12FilterResultSQBq8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda3TSQDn5range__T4iotaTmTxmZQlFmxmZ6ResultZQDy8popFrontMFNaNbNiZv@Base 12
+ _D3std9algorithm9iteration__T12FilterResultS_DQBs3uni__T19comparePropertyNameTaTaZQBaFNaNfAxaQdZ4predFNaNbNiNfwZbTSQEjQEiQEb__T9MapResultSQFg5ascii7toLowerTQCoZQBhZQFf11__xopEqualsMxFKxSQHcQHbQGu__TQGnS_DQHuQGc__TQGbTaTaZQGjFNaNfQFjQFmZQFkQFiTQEzZQIkZb@Base 12
+ _D3std9algorithm9iteration__T12FilterResultS_DQBs3uni__T19comparePropertyNameTaTaZQBaFNaNfAxaQdZ4predFNaNbNiNfwZbTSQEjQEiQEb__T9MapResultSQFg5ascii7toLowerTQCoZQBhZQFf4saveMFNaNbNdNiNfZSQHcQHbQGu__TQGnS_DQHuQGc__TQGbTaTaZQGjFNaNfQFjQFmZQFkQFiTQEzZQIk@Base 12
+ _D3std9algorithm9iteration__T12FilterResultS_DQBs3uni__T19comparePropertyNameTaTaZQBaFNaNfAxaQdZ4predFNaNbNiNfwZbTSQEjQEiQEb__T9MapResultSQFg5ascii7toLowerTQCoZQBhZQFf5emptyMFNaNdNfZb@Base 12
+ _D3std9algorithm9iteration__T12FilterResultS_DQBs3uni__T19comparePropertyNameTaTaZQBaFNaNfAxaQdZ4predFNaNbNiNfwZbTSQEjQEiQEb__T9MapResultSQFg5ascii7toLowerTQCoZQBhZQFf5frontMFNaNdNfZw@Base 12
+ _D3std9algorithm9iteration__T12FilterResultS_DQBs3uni__T19comparePropertyNameTaTaZQBaFNaNfAxaQdZ4predFNaNbNiNfwZbTSQEjQEiQEb__T9MapResultSQFg5ascii7toLowerTQCoZQBhZQFf5primeMFNaNfZv@Base 12
+ _D3std9algorithm9iteration__T12FilterResultS_DQBs3uni__T19comparePropertyNameTaTaZQBaFNaNfAxaQdZ4predFNaNbNiNfwZbTSQEjQEiQEb__T9MapResultSQFg5ascii7toLowerTQCoZQBhZQFf6__ctorMFNaNbNcNiNfQCuZSQHhQHgQGz__TQGsS_DQHzQGh__TQGgTaTaZQGoFNaNfQFoQFrZQFpQFnTQFeZQIp@Base 12
+ _D3std9algorithm9iteration__T12FilterResultS_DQBs3uni__T19comparePropertyNameTaTaZQBaFNaNfAxaQdZ4predFNaNbNiNfwZbTSQEjQEiQEb__T9MapResultSQFg5ascii7toLowerTQCoZQBhZQFf6__ctorMFNaNbNcNiNfQCubZSQHiQHhQHa__TQGtS_DQIaQGi__TQGhTaTaZQGpFNaNfQFpQFsZQFqQFoTQFfZQIq@Base 12
+ _D3std9algorithm9iteration__T12FilterResultS_DQBs3uni__T19comparePropertyNameTaTaZQBaFNaNfAxaQdZ4predFNaNbNiNfwZbTSQEjQEiQEb__T9MapResultSQFg5ascii7toLowerTQCoZQBhZQFf6__initZ@Base 12
+ _D3std9algorithm9iteration__T12FilterResultS_DQBs3uni__T19comparePropertyNameTaTaZQBaFNaNfAxaQdZ4predFNaNbNiNfwZbTSQEjQEiQEb__T9MapResultSQFg5ascii7toLowerTQCoZQBhZQFf7opSliceMFNaNbNiNfZSQHdQHcQGv__TQGoS_DQHvQGd__TQGcTaTaZQGkFNaNfQFkQFnZQFlQFjTQFaZQIl@Base 12
+ _D3std9algorithm9iteration__T12FilterResultS_DQBs3uni__T19comparePropertyNameTaTaZQBaFNaNfAxaQdZ4predFNaNbNiNfwZbTSQEjQEiQEb__T9MapResultSQFg5ascii7toLowerTQCoZQBhZQFf8popFrontMFNaNfZv@Base 12
+ _D3std9algorithm9iteration__T12FilterResultS_DQBs3uni__T19comparePropertyNameTaTaZQBaFNaNfAxaQdZ4predFNaNbNiNfwZbTSQEjQEiQEb__T9MapResultSQFg5ascii7toLowerTQCoZQBhZQFf9__xtoHashFNbNeKxSQHbQHaQGt__TQGmS_DQHtQGb__TQGaTaTaZQGiFNaNfQFiQFlZQFjQFhTQEyZQIjZm@Base 12
+ _D3std9algorithm9iteration__T12FilterResultS_DQBs4file10dirEntriesFAyaQdEQCtQBb8SpanModebZ1fMFNaNbNfSQDvQCd8DirEntryZbTSQEoQCw11DirIteratorZQEh11__fieldDtorMFZv@Base 12
+ _D3std9algorithm9iteration__T12FilterResultS_DQBs4file10dirEntriesFAyaQdEQCtQBb8SpanModebZ1fMFNaNbNfSQDvQCd8DirEntryZbTSQEoQCw11DirIteratorZQEh15__fieldPostblitMFNaNbNiNlZv@Base 12
+ _D3std9algorithm9iteration__T12FilterResultS_DQBs4file10dirEntriesFAyaQdEQCtQBb8SpanModebZ1fMFNaNbNfSQDvQCd8DirEntryZbTSQEoQCw11DirIteratorZQEh5emptyMFNdNfZb@Base 12
+ _D3std9algorithm9iteration__T12FilterResultS_DQBs4file10dirEntriesFAyaQdEQCtQBb8SpanModebZ1fMFNaNbNfSQDvQCd8DirEntryZbTSQEoQCw11DirIteratorZQEh5frontMFNdNfZQCe@Base 12
+ _D3std9algorithm9iteration__T12FilterResultS_DQBs4file10dirEntriesFAyaQdEQCtQBb8SpanModebZ1fMFNaNbNfSQDvQCd8DirEntryZbTSQEoQCw11DirIteratorZQEh5primeMFNfZv@Base 12
+ _D3std9algorithm9iteration__T12FilterResultS_DQBs4file10dirEntriesFAyaQdEQCtQBb8SpanModebZ1fMFNaNbNfSQDvQCd8DirEntryZbTSQEoQCw11DirIteratorZQEh6__ctorMFNcQBjZSQGbQGaQFt__TQFmS_DQGtQFbQEzFQEqQEtQErbZQEeMQEfTQDjZQGz@Base 12
+ _D3std9algorithm9iteration__T12FilterResultS_DQBs4file10dirEntriesFAyaQdEQCtQBb8SpanModebZ1fMFNaNbNfSQDvQCd8DirEntryZbTSQEoQCw11DirIteratorZQEh6__ctorMFNcQBjbZSQGcQGbQFu__TQFnS_DQGuQFcQFaFQErQEuQEsbZQEfMQEgTQDkZQHa@Base 12
+ _D3std9algorithm9iteration__T12FilterResultS_DQBs4file10dirEntriesFAyaQdEQCtQBb8SpanModebZ1fMFNaNbNfSQDvQCd8DirEntryZbTSQEoQCw11DirIteratorZQEh6__initZ@Base 12
+ _D3std9algorithm9iteration__T12FilterResultS_DQBs4file10dirEntriesFAyaQdEQCtQBb8SpanModebZ1fMFNaNbNfSQDvQCd8DirEntryZbTSQEoQCw11DirIteratorZQEh7opSliceMFNaNbNiZSQGdQGcQFv__TQFoS_DQGvQFdQFbFQEsQEvQEtbZQEgMQEhTQDlZQHb@Base 12
+ _D3std9algorithm9iteration__T12FilterResultS_DQBs4file10dirEntriesFAyaQdEQCtQBb8SpanModebZ1fMFNaNbNfSQDvQCd8DirEntryZbTSQEoQCw11DirIteratorZQEh8opAssignMFNcNjSQGbQGaQFt__TQFmS_DQGtQFbQEzFQEqQEtQErbZQEeMQEfTQDjZQGzZQCe@Base 12
+ _D3std9algorithm9iteration__T12FilterResultS_DQBs4file10dirEntriesFAyaQdEQCtQBb8SpanModebZ1fMFNaNbNfSQDvQCd8DirEntryZbTSQEoQCw11DirIteratorZQEh8popFrontMFNfZv@Base 12
+ _D3std9algorithm9iteration__T3mapSQBg5ascii7toLowerZ__TQBaTAxaZQBiFNaNbNiNfQqZSQCzQCyQCr__T9MapResultSQDwQCqQCnTQCbZQz@Base 12
+ _D3std9algorithm9iteration__T3mapSQBg5ascii7toLowerZ__TQBaTSQCg3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQDdFNaNbNiNfQClZSQEvQEuQEn__T9MapResultSQFsQEmQEjTQDxZQz@Base 12
+ _D3std9algorithm9iteration__T3mapSQBg5ascii7toLowerZ__TQBaTSQCg3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplZQDdFNaNbNiNfQClZSQEvQEuQEn__T9MapResultSQFsQEmQEjTQDxZQz@Base 12
+ _D3std9algorithm9iteration__T3mapVAyaa4_615b305dZ__TQxTSQCc3uni21DecompressedIntervalsZQCgFNaNbNiNfQBsZSQDyQDxQDq__T9MapResultSQEv10functional__T8unaryFunVQEra4_615b305dVQFga1_61ZQBiTQEyZQCt@Base 12
+ _D3std9algorithm9iteration__T3mapVAyaa4_615b315dZ__TQxTSQCc3uni21DecompressedIntervalsZQCgFNaNbNiNfQBsZSQDyQDxQDq__T9MapResultSQEv10functional__T8unaryFunVQEra4_615b315dVQFga1_61ZQBiTQEyZQCt@Base 12
+ _D3std9algorithm9iteration__T3mapVAyaa5_612e726873Z__TQzTAySQCg8internal14unicode_tables9CompEntryZQCsFNaNbNiNfQCcZSQEkQEjQEc__T9MapResultSQFh10functional__T8unaryFunVQFda5_612e726873VQFua1_61ZQBkTQFkZQCv@Base 12
+ _D3std9algorithm9iteration__T3mapVAyaa6_612e6e616d65Z__TQBbTAySQCj8internal14unicode_tables15UnicodePropertyZQDcFNaNbNiNfQCjZSQEuQEtQEm__T9MapResultSQFr10functional__T8unaryFunVQFna6_612e6e616d65VQGga1_61ZQBmTQFtZQCx@Base 12
+ _D3std9algorithm9iteration__T3sumTAkTkZQkFNaNbNiNfQqkZk@Base 12
+ _D3std9algorithm9iteration__T3sumTAkZQiFNaNbNiNfQoZk@Base 12
+ _D3std9algorithm9iteration__T4uniqVAyaa6_61203d3d2062TSQCb5range__T11SortedRangeTAQBvVQBza5_61203c2062VEQDyQBx18SortedRangeOptionsi0ZQCoZQEeFNaNbNiNfQDrZSQFwQFvQFo__T10UniqResultSQGv10functional__T9binaryFunVQGra6_61203d3d2062VQHka1_61VQHta1_62ZQBwTQHnZQDj@Base 12
+ _D3std9algorithm9iteration__T6filterS_DQBl3uni__T19comparePropertyNameTaTaZQBaFNaNfAxaQdZ4predFNaNbNiNfwZbZ__TQDdTSQEjQEiQEb__T9MapResultSQFg5ascii7toLowerTQCvZQBhZQFfFNaNbNiNfQCkZSQGxQGwQGp__T12FilterResultS_DQIaQGp__TQGoTaTaZQGwFNaNfQFwQFzZQFxQFvTQFfZQCi@Base 12
+ _D3std9algorithm9iteration__T6joinerTSQBkQBjQBc__T9MapResultSQCh8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQEeQEdQDw__T12FilterResultSQFfQCyQCsQCmMxFNbNdZ9__lambda1TSQGl5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGdZQHcFNaNbNiQHeZSQIsQIrQIk__TQIdTQHzZQIlFQIhZQCh@Base 12
+ _D3std9algorithm9iteration__T6joinerTSQBkQBjQBc__T9MapResultSQCh8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQEeQEdQDw__T12FilterResultSQFfQCyQCsQCmMxFNbNdZ9__lambda1TSQGl5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGdZQHcFQGyZQy14replaceCurrentMFNaNbNiNeSQJoQHh__T7BitsSetTmZQlZv@Base 12
+ _D3std9algorithm9iteration__T6joinerTSQBkQBjQBc__T9MapResultSQCh8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQEeQEdQDw__T12FilterResultSQFfQCyQCsQCmMxFNbNdZ9__lambda1TSQGl5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGdZQHcFQGyZQy4saveMFNaNbNdNiNfZSQJgQJfQIy__TQIrTQInZQIzFQIvZQCv@Base 12
+ _D3std9algorithm9iteration__T6joinerTSQBkQBjQBc__T9MapResultSQCh8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQEeQEdQDw__T12FilterResultSQFfQCyQCsQCmMxFNbNdZ9__lambda1TSQGl5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGdZQHcFQGyZQy5emptyMFNaNbNdNiZb@Base 12
+ _D3std9algorithm9iteration__T6joinerTSQBkQBjQBc__T9MapResultSQCh8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQEeQEdQDw__T12FilterResultSQFfQCyQCsQCmMxFNbNdZ9__lambda1TSQGl5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGdZQHcFQGyZQy5frontMFNaNbNdNiZm@Base 12
+ _D3std9algorithm9iteration__T6joinerTSQBkQBjQBc__T9MapResultSQCh8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQEeQEdQDw__T12FilterResultSQFfQCyQCsQCmMxFNbNdZ9__lambda1TSQGl5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGdZQHcFQGyZQy6__ctorMFNaNbNcNiNfQHxSQJkQHd__T7BitsSetTmZQlZSQKiQKhQKa__TQJtTQJpZQKbFQJxZQDx@Base 12
+ _D3std9algorithm9iteration__T6joinerTSQBkQBjQBc__T9MapResultSQCh8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQEeQEdQDw__T12FilterResultSQFfQCyQCsQCmMxFNbNdZ9__lambda1TSQGl5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGdZQHcFQGyZQy6__ctorMFNaNbNcNiQHvZSQJjQJiQJb__TQIuTQIqZQJcFQIyZQCy@Base 12
+ _D3std9algorithm9iteration__T6joinerTSQBkQBjQBc__T9MapResultSQCh8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQEeQEdQDw__T12FilterResultSQFfQCyQCsQCmMxFNbNdZ9__lambda1TSQGl5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGdZQHcFQGyZQy6__initZ@Base 12
+ _D3std9algorithm9iteration__T6joinerTSQBkQBjQBc__T9MapResultSQCh8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQEeQEdQDw__T12FilterResultSQFfQCyQCsQCmMxFNbNdZ9__lambda1TSQGl5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGdZQHcFQGyZQy8popFrontMFNaNbNiZv@Base 12
+ _D3std9algorithm9iteration__T6reduceVAyaa5_61202b2062Z__T10reduceImplVbi0TAkTkZQwFNaNbNiNfQqKkZk@Base 12
+ _D3std9algorithm9iteration__T6reduceVAyaa5_61202b2062Z__T13reducePreImplTAkTkZQvFNaNbNiNfQqKkZk@Base 12
+ _D3std9algorithm9iteration__T6reduceVAyaa5_61202b2062Z__TQBcTkTAkZQBlFNaNbNiNfkQqZk@Base 12
+ _D3std9algorithm9iteration__T8splitterVAyaa6_61203d3d2062VEQCf8typecons__T4FlagVQBpa14_6b656570536570617261746f7273ZQBqi0TAxaTQDjZQDxFNaNbNiNfQuQEbZSQFrQFqQFj__TQFcVQEwa6_61203d3d2062VQEwi0TQCqTQFzZQGnFQDcQGkZ6Result@Base 12
+ _D3std9algorithm9iteration__T8splitterVAyaa6_61203d3d2062VEQCf8typecons__T4FlagVQBpa14_6b656570536570617261746f7273ZQBqi0TAxaTQDjZQDxFQmQDtZ6Result11__xopEqualsMxFKxSQGiQGhQGa__TQFtVQFna6_61203d3d2062VQFni0TQDhTQGqZQHeFQDtQHbZQDiZb@Base 12
+ _D3std9algorithm9iteration__T8splitterVAyaa6_61203d3d2062VEQCf8typecons__T4FlagVQBpa14_6b656570536570617261746f7273ZQBqi0TAxaTQDjZQDxFQmQDtZ6Result15separatorLengthMFNaNbNdNiNfZm@Base 12
+ _D3std9algorithm9iteration__T8splitterVAyaa6_61203d3d2062VEQCf8typecons__T4FlagVQBpa14_6b656570536570617261746f7273ZQBqi0TAxaTQDjZQDxFQmQDtZ6Result17ensureFrontLengthMFNaNbNiNfZv@Base 12
+ _D3std9algorithm9iteration__T8splitterVAyaa6_61203d3d2062VEQCf8typecons__T4FlagVQBpa14_6b656570536570617261746f7273ZQBqi0TAxaTQDjZQDxFQmQDtZ6Result4saveMFNaNbNdNiNfZSQGiQGhQGa__TQFtVQFna6_61203d3d2062VQFni0TQDhTQGqZQHeFQDtQHbZQDi@Base 12
+ _D3std9algorithm9iteration__T8splitterVAyaa6_61203d3d2062VEQCf8typecons__T4FlagVQBpa14_6b656570536570617261746f7273ZQBqi0TAxaTQDjZQDxFQmQDtZ6Result5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std9algorithm9iteration__T8splitterVAyaa6_61203d3d2062VEQCf8typecons__T4FlagVQBpa14_6b656570536570617261746f7273ZQBqi0TAxaTQDjZQDxFQmQDtZ6Result5frontMFNaNbNdNiNfZQBs@Base 12
+ _D3std9algorithm9iteration__T8splitterVAyaa6_61203d3d2062VEQCf8typecons__T4FlagVQBpa14_6b656570536570617261746f7273ZQBqi0TAxaTQDjZQDxFQmQDtZ6Result6__ctorMFNaNbNcNiNfQBsQFaZSQGqQGpQGi__TQGbVQFva6_61203d3d2062VQFvi0TQDpTQGyZQHmFQEbQHjZQDq@Base 12
+ _D3std9algorithm9iteration__T8splitterVAyaa6_61203d3d2062VEQCf8typecons__T4FlagVQBpa14_6b656570536570617261746f7273ZQBqi0TAxaTQDjZQDxFQmQDtZ6Result6__initZ@Base 12
+ _D3std9algorithm9iteration__T8splitterVAyaa6_61203d3d2062VEQCf8typecons__T4FlagVQBpa14_6b656570536570617261746f7273ZQBqi0TAxaTQDjZQDxFQmQDtZ6Result8popFrontMFNaNbNiNfZv@Base 12
+ _D3std9algorithm9iteration__T8splitterVAyaa6_61203d3d2062VEQCf8typecons__T4FlagVQBpa14_6b656570536570617261746f7273ZQBqi0TAxaTQDjZQDxFQmQDtZ6Result9__xtoHashFNbNeKxSQGhQGgQFz__TQFsVQFma6_61203d3d2062VQFmi0TQDgTQGpZQHdFQDsQHaZQDhZm@Base 12
+ _D3std9algorithm9iteration__T8splitterVAyaa6_61203d3d2062VEQCf8typecons__T4FlagVQBpa14_6b656570536570617261746f7273ZQBqi0TQDfTQDjZQDxFNaNbNiNfQDzQEcZSQFsQFrQFk__TQFdVQExa6_61203d3d2062VQExi0TQFwTQGaZQGoFQGiQGlZ6Result@Base 12
+ _D3std9algorithm9iteration__T8splitterVAyaa6_61203d3d2062VEQCf8typecons__T4FlagVQBpa14_6b656570536570617261746f7273ZQBqi0TQDfTQDjZQDxFQDrQDuZ6Result11__xopEqualsMxFKxSQGjQGiQGb__TQFuVQFoa6_61203d3d2062VQFoi0TQGnTQGrZQHfFQGzQHcZQDiZb@Base 12
+ _D3std9algorithm9iteration__T8splitterVAyaa6_61203d3d2062VEQCf8typecons__T4FlagVQBpa14_6b656570536570617261746f7273ZQBqi0TQDfTQDjZQDxFQDrQDuZ6Result15separatorLengthMFNaNbNdNiNfZm@Base 12
+ _D3std9algorithm9iteration__T8splitterVAyaa6_61203d3d2062VEQCf8typecons__T4FlagVQBpa14_6b656570536570617261746f7273ZQBqi0TQDfTQDjZQDxFQDrQDuZ6Result17ensureFrontLengthMFNaNbNiNfZv@Base 12
+ _D3std9algorithm9iteration__T8splitterVAyaa6_61203d3d2062VEQCf8typecons__T4FlagVQBpa14_6b656570536570617261746f7273ZQBqi0TQDfTQDjZQDxFQDrQDuZ6Result4saveMFNaNbNdNiNfZSQGjQGiQGb__TQFuVQFoa6_61203d3d2062VQFoi0TQGnTQGrZQHfFQGzQHcZQDi@Base 12
+ _D3std9algorithm9iteration__T8splitterVAyaa6_61203d3d2062VEQCf8typecons__T4FlagVQBpa14_6b656570536570617261746f7273ZQBqi0TQDfTQDjZQDxFQDrQDuZ6Result5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std9algorithm9iteration__T8splitterVAyaa6_61203d3d2062VEQCf8typecons__T4FlagVQBpa14_6b656570536570617261746f7273ZQBqi0TQDfTQDjZQDxFQDrQDuZ6Result5frontMFNaNbNdNiNfZQEy@Base 12
+ _D3std9algorithm9iteration__T8splitterVAyaa6_61203d3d2062VEQCf8typecons__T4FlagVQBpa14_6b656570536570617261746f7273ZQBqi0TQDfTQDjZQDxFQDrQDuZ6Result6__ctorMFNaNbNcNiNfQEyQFbZSQGrQGqQGj__TQGcVQFwa6_61203d3d2062VQFwi0TQGvTQGzZQHnFQHhQHkZQDq@Base 12
+ _D3std9algorithm9iteration__T8splitterVAyaa6_61203d3d2062VEQCf8typecons__T4FlagVQBpa14_6b656570536570617261746f7273ZQBqi0TQDfTQDjZQDxFQDrQDuZ6Result6__initZ@Base 12
+ _D3std9algorithm9iteration__T8splitterVAyaa6_61203d3d2062VEQCf8typecons__T4FlagVQBpa14_6b656570536570617261746f7273ZQBqi0TQDfTQDjZQDxFQDrQDuZ6Result8popFrontMFNaNbNiNfZv@Base 12
+ _D3std9algorithm9iteration__T8splitterVAyaa6_61203d3d2062VEQCf8typecons__T4FlagVQBpa14_6b656570536570617261746f7273ZQBqi0TQDfTQDjZQDxFQDrQDuZ6Result9__xtoHashFNbNeKxSQGiQGhQGa__TQFtVQFna6_61203d3d2062VQFni0TQGmTQGqZQHeFQGyQHbZQDhZm@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQDq3uni21DecompressedIntervalsZQDu11__xopEqualsMxFKxSQFrQFqQFj__TQFcSQGhQEv__TQEmVQEga4_615b305dVQEva1_61ZQFoTQEhZQGzZb@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQDq3uni21DecompressedIntervalsZQDu4saveMFNaNdNfZSQFnQFmQFf__TQEySQGdQEr__TQEiVQEca4_615b305dVQEra1_61ZQFkTQEdZQGv@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQDq3uni21DecompressedIntervalsZQDu5emptyMFNaNdNfZb@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQDq3uni21DecompressedIntervalsZQDu5frontMFNaNdNfZk@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQDq3uni21DecompressedIntervalsZQDu6__ctorMFNaNbNcNiNfQCcZSQFwQFvQFo__TQFhSQGmQFa__TQErVQEla4_615b305dVQFaa1_61ZQFtTQEmZQHe@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQDq3uni21DecompressedIntervalsZQDu6__initZ@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQDq3uni21DecompressedIntervalsZQDu8popFrontMFNaNfZv@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQDq3uni21DecompressedIntervalsZQDu9__xtoHashFNbNeKxSQFqQFpQFi__TQFbSQGgQEu__TQElVQEfa4_615b305dVQEua1_61ZQFnTQEgZQGyZm@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa4_615b315dVQpa1_61ZQBhTSQDq3uni21DecompressedIntervalsZQDu11__xopEqualsMxFKxSQFrQFqQFj__TQFcSQGhQEv__TQEmVQEga4_615b315dVQEva1_61ZQFoTQEhZQGzZb@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa4_615b315dVQpa1_61ZQBhTSQDq3uni21DecompressedIntervalsZQDu4saveMFNaNdNfZSQFnQFmQFf__TQEySQGdQEr__TQEiVQEca4_615b315dVQEra1_61ZQFkTQEdZQGv@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa4_615b315dVQpa1_61ZQBhTSQDq3uni21DecompressedIntervalsZQDu5emptyMFNaNdNfZb@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa4_615b315dVQpa1_61ZQBhTSQDq3uni21DecompressedIntervalsZQDu5frontMFNaNdNfZk@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa4_615b315dVQpa1_61ZQBhTSQDq3uni21DecompressedIntervalsZQDu6__ctorMFNaNbNcNiNfQCcZSQFwQFvQFo__TQFhSQGmQFa__TQErVQEla4_615b315dVQFaa1_61ZQFtTQEmZQHe@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa4_615b315dVQpa1_61ZQBhTSQDq3uni21DecompressedIntervalsZQDu6__initZ@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa4_615b315dVQpa1_61ZQBhTSQDq3uni21DecompressedIntervalsZQDu8popFrontMFNaNfZv@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa4_615b315dVQpa1_61ZQBhTSQDq3uni21DecompressedIntervalsZQDu9__xtoHashFNbNeKxSQFqQFpQFi__TQFbSQGgQEu__TQElVQEfa4_615b315dVQEua1_61ZQFnTQEgZQGyZm@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQDu8internal14unicode_tables9CompEntryZQEg11__xopEqualsMxFKxSQGdQGcQFv__TQFoSQGtQFh__TQEyVQEsa5_612e726873VQFja1_61ZQGcTQEtZQHnZb@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQDu8internal14unicode_tables9CompEntryZQEg4saveMFNaNbNdNiNfZSQGdQGcQFv__TQFoSQGtQFh__TQEyVQEsa5_612e726873VQFja1_61ZQGcTQEtZQHn@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQDu8internal14unicode_tables9CompEntryZQEg5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQDu8internal14unicode_tables9CompEntryZQEg5frontMFNaNbNdNiNfZyw@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQDu8internal14unicode_tables9CompEntryZQEg6__ctorMFNaNbNcNiNfQCmZSQGiQGhQGa__TQFtSQGyQFm__TQFdVQExa5_612e726873VQFoa1_61ZQGhTQEyZQHs@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQDu8internal14unicode_tables9CompEntryZQEg6__initZ@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQDu8internal14unicode_tables9CompEntryZQEg7opIndexMFNaNbNiNfmZyw@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQDu8internal14unicode_tables9CompEntryZQEg7opSliceMFNaNbNiNfmmZSQGgQGfQFy__TQFrSQGwQFk__TQFbVQEva5_612e726873VQFma1_61ZQGfTQEwZQHq@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQDu8internal14unicode_tables9CompEntryZQEg8popFrontMFNaNbNiNfZv@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQDu8internal14unicode_tables9CompEntryZQEg9__mixin116lengthMFNaNbNdNiNfZm@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQDu8internal14unicode_tables9CompEntryZQEg9__xtoHashFNbNeKxSQGcQGbQFu__TQFnSQGsQFg__TQExVQEra5_612e726873VQFia1_61ZQGbTQEsZQHmZm@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQDu8internal14unicode_tables9CompEntryZQEg__T4backZQgMFNaNbNdNiNfZyw@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa5_612e726873VQra1_61ZQBjTAySQDu8internal14unicode_tables9CompEntryZQEg__T7popBackZQjMFNaNbNiNfZv@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQDw8internal14unicode_tables15UnicodePropertyZQEp11__xopEqualsMxFKxSQGmQGlQGe__TQFxSQHcQFq__TQFhVQFba6_612e6e616d65VQFua1_61ZQGnTQFcZQHyZb@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQDw8internal14unicode_tables15UnicodePropertyZQEp4saveMFNaNbNdNiNfZSQGmQGlQGe__TQFxSQHcQFq__TQFhVQFba6_612e6e616d65VQFua1_61ZQGnTQFcZQHy@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQDw8internal14unicode_tables15UnicodePropertyZQEp5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQDw8internal14unicode_tables15UnicodePropertyZQEp5frontMFNaNbNdNiNfZyAa@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQDw8internal14unicode_tables15UnicodePropertyZQEp6__ctorMFNaNbNcNiNfQCtZSQGrQGqQGj__TQGcSQHhQFv__TQFmVQFga6_612e6e616d65VQFza1_61ZQGsTQFhZQId@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQDw8internal14unicode_tables15UnicodePropertyZQEp6__initZ@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQDw8internal14unicode_tables15UnicodePropertyZQEp7opIndexMFNaNbNiNfmZyAa@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQDw8internal14unicode_tables15UnicodePropertyZQEp7opSliceMFNaNbNiNfmmZSQGpQGoQGh__TQGaSQHfQFt__TQFkVQFea6_612e6e616d65VQFxa1_61ZQGqTQFfZQIb@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQDw8internal14unicode_tables15UnicodePropertyZQEp8popFrontMFNaNbNiNfZv@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQDw8internal14unicode_tables15UnicodePropertyZQEp9__mixin116lengthMFNaNbNdNiNfZm@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQDw8internal14unicode_tables15UnicodePropertyZQEp9__xtoHashFNbNeKxSQGlQGkQGd__TQFwSQHbQFp__TQFgVQFaa6_612e6e616d65VQFta1_61ZQGmTQFbZQHxZm@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQDw8internal14unicode_tables15UnicodePropertyZQEp__T4backZQgMFNaNbNdNiNfZyAa@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa6_612e6e616d65VQta1_61ZQBlTAySQDw8internal14unicode_tables15UnicodePropertyZQEp__T7popBackZQjMFNaNbNiNfZv@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTAxaZQBh11__xopEqualsMxFKxSQDeQDdQCw__TQCpSQDuQCiQCfTQCaZQDhZb@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTAxaZQBh4saveMFNaNbNdNiNfZSQDeQDdQCw__TQCpSQDuQCiQCfTQCaZQDh@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTAxaZQBh5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTAxaZQBh5frontMFNaNdNfZw@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTAxaZQBh6__ctorMFNaNbNcNiNfQBaZSQDjQDiQDb__TQCuSQDzQCnQCkTQCfZQDm@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTAxaZQBh6__initZ@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTAxaZQBh8popFrontMFNaNbNiNfZv@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTAxaZQBh9__xtoHashFNbNeKxSQDdQDcQCv__TQCoSQDtQChQCeTQBzZQDgZm@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTSQCf3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQDc11__xopEqualsMxFKxSQEzQEyQEr__TQEkSQFpQEdQEaTQDvZQFcZb@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTSQCf3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQDc4saveMFNaNbNdNiNfZSQEzQEyQEr__TQEkSQFpQEdQEaTQDvZQFc@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTSQCf3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQDc5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTSQCf3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQDc5frontMFNaNbNdNiNfZa@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTSQCf3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQDc6__ctorMFNaNbNcNiNfQCvZSQFeQFdQEw__TQEpSQFuQEiQEfTQEaZQFh@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTSQCf3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQDc6__initZ@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTSQCf3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQDc7opIndexMFNaNbNiNfmZa@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTSQCf3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQDc7opSliceMFNaNbNiNfmmZSQFcQFbQEu__TQEnSQFsQEgQEdTQDyZQFf@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTSQCf3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQDc8popFrontMFNaNbNiNfZv@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTSQCf3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQDc9__mixin116lengthMFNaNbNdNiNfZm@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTSQCf3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQDc9__xtoHashFNbNeKxSQEyQExQEq__TQEjSQFoQEcQDzTQDuZQFbZm@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTSQCf3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplZQDc11__xopEqualsMxFKxSQEzQEyQEr__TQEkSQFpQEdQEaTQDvZQFcZb@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTSQCf3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplZQDc4saveMFNaNbNdNiNfZSQEzQEyQEr__TQEkSQFpQEdQEaTQDvZQFc@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTSQCf3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplZQDc5emptyMFNaNbNdNiNfZb@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTSQCf3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplZQDc5frontMFNaNbNdNiNfZa@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTSQCf3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplZQDc6__ctorMFNaNbNcNiNfQCvZSQFeQFdQEw__TQEpSQFuQEiQEfTQEaZQFh@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTSQCf3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplZQDc6__initZ@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTSQCf3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplZQDc7opIndexMFNaNbNiNfmZa@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTSQCf3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplZQDc7opSliceMFNaNbNiNfmmZSQFcQFbQEu__TQEnSQFsQEgQEdTQDyZQFf@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTSQCf3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplZQDc8popFrontMFNaNbNiNfZv@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTSQCf3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplZQDc9__mixin116lengthMFNaNbNdNiNfZm@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTSQCf3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplZQDc9__xtoHashFNbNeKxSQEyQExQEq__TQEjSQFoQEcQDzTQDuZQFbZm@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQDjQDiQDb__T12FilterResultSQEkQCyQCsQCmMxFNbNdZ9__lambda1TSQFq5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGd4saveMFNaNbNdNiNfZSQIaQHzQHs__TQHlSQIqQHeQGyQGsMxFNbNdZQGnTQGgZQIr@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQDjQDiQDb__T12FilterResultSQEkQCyQCsQCmMxFNbNdZ9__lambda1TSQFq5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGd5emptyMFNaNbNdNiZb@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQDjQDiQDb__T12FilterResultSQEkQCyQCsQCmMxFNbNdZ9__lambda1TSQFq5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGd5frontMFNaNbNdNiZSQHzQGn__T7BitsSetTmZQl@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQDjQDiQDb__T12FilterResultSQEkQCyQCsQCmMxFNbNdZ9__lambda1TSQFq5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGd6__ctorMFNaNbNcNiNfQEsZSQIfQIeQHx__TQHqSQIvQHjQHdQGxMxFNbNdZQGsTQGlZQIw@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQDjQDiQDb__T12FilterResultSQEkQCyQCsQCmMxFNbNdZ9__lambda1TSQFq5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGd6__initZ@Base 12
+ _D3std9algorithm9iteration__T9MapResultSQBm8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQDjQDiQDb__T12FilterResultSQEkQCyQCsQCmMxFNbNdZ9__lambda1TSQFq5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGd8popFrontMFNaNbNiZv@Base 12
+ _D3std9algorithm9searching11__moduleRefZ@Base 12
+ _D3std9algorithm9searching12__ModuleInfoZ@Base 12
+ _D3std9algorithm9searching__T10countUntilVAyaa11_615b305d203e2030783830TAxSQCv3uni17CodepointIntervalZQCvFNaNbNiNfQBqZl@Base 12
+ _D3std9algorithm9searching__T10countUntilVAyaa11_62203c20612e74696d6554TAySQCv8datetime8timezone13PosixTimeZone10LeapSecondTylZQDuFNaNbNiNfQCpylZl@Base 12
+ _D3std9algorithm9searching__T10countUntilVAyaa11_62203c20612e74696d6554TAySQCv8datetime8timezone13PosixTimeZone10TransitionTlZQDtFNaNbNiNfQColZl@Base 12
+ _D3std9algorithm9searching__T10countUntilVAyaa11_62203c20612e74696d6554TAySQCv8datetime8timezone13PosixTimeZone10TransitionTylZQDuFNaNbNiNfQCpylZl@Base 12
+ _D3std9algorithm9searching__T10countUntilVAyaa6_61203d3d2062TAQuTQxZQBnFNaNbNiNfQtQBoZl@Base 12
+ _D3std9algorithm9searching__T10countUntilVAyaa6_61203d3d2062TASQCj3uni__T13InversionListTSQDkQBb8GcPolicyZQBhTQBwZQDhFNaNbNiNfQCnQCpZl@Base 12
+ _D3std9algorithm9searching__T10countUntilVAyaa6_61203d3d2062TAaTaZQBlFNaNiNfQpaZl@Base 12
+ _D3std9algorithm9searching__T10countUntilVAyaa6_61203d3d2062TAkTkZQBlFNaNbNiNfQrkZl@Base 12
+ _D3std9algorithm9searching__T10countUntilVAyaa6_61203d3d2062TAyAaTQyZQBoFNaNbNiNfQuQBpZl@Base 12
+ _D3std9algorithm9searching__T10countUntilVAyaa6_61203d3d2062TAyhTAxhZQBoFNaNbNiNfQuQsZl@Base 12
+ _D3std9algorithm9searching__T10countUntilVAyaa6_61203d3d2062TSQCi3utf__T10byCodeUnitTQBrZQrFQByZ14ByCodeUnitImplTQCtTQCxTQDbTQDfTQDjTQDnTQDrTQDvZQEmFNaNfQDoQEkQEnQEqQEtQEwQEzQFcQFfZl@Base 12
+ _D3std9algorithm9searching__T10countUntilVAyaa6_61203d3d2062TSQCi3utf__T10byCodeUnitTQBrZQrFQByZ14ByCodeUnitImplTQCtTQCxTQDbTQDfTQDjTQDnTQDrZQEiFNaNfQDkQEgQEjQEmQEpQEsQEvQEyZl@Base 12
+ _D3std9algorithm9searching__T10countUntilVAyaa6_61203d3d2062TSQCi3utf__T10byCodeUnitTQBrZQrFQByZ14ByCodeUnitImplTaTaZQDkFNaNbNiNfQCqaaZl@Base 12
+ _D3std9algorithm9searching__T10countUntilVAyaa6_61203d3d2062TSQCi3utf__T10byCodeUnitTQBrZQrFQByZ14ByCodeUnitImplTaZQDiFNaNbNiNfQCoaZl@Base 12
+ _D3std9algorithm9searching__T10countUntilVAyaa6_61203d3d2062TSQCi3utf__T10byCodeUnitTQBrZQrFQByZ14ByCodeUnitImplTyaZQDjFNaNbNiNfQCpyaZl@Base 12
+ _D3std9algorithm9searching__T10startsWithSQBoQBnQBg11__lambda171TAxaTAyaTQeTQhZQByFNaNfQwQuQwQyZk@Base 12
+ _D3std9algorithm9searching__T10startsWithSQBoQBnQBg11__lambda171TAxaTAyaTQeZQBvFNaNfQtQrQtZk@Base 12
+ _D3std9algorithm9searching__T10startsWithSQBoQBnQBg11__lambda171TAxaTAyaZQBsFNaNfQqQoZb@Base 12
+ _D3std9algorithm9searching__T10startsWithVAyaa6_61203d3d2062TAxaTQxZQBnFNaNbNiNfQtQBoZb@Base 12
+ _D3std9algorithm9searching__T10startsWithVAyaa6_61203d3d2062TAxaTaZQBmFNaNbNiNfQsaZb@Base 12
+ _D3std9algorithm9searching__T10startsWithVAyaa6_61203d3d2062TAyhTQxZQBnFNaNfQpQBkZb@Base 12
+ _D3std9algorithm9searching__T10startsWithVAyaa6_61203d3d2062TQtTQwZQBmFNaNbNiNfQBlQBoZb@Base 12
+ _D3std9algorithm9searching__T10startsWithVAyaa6_61203d3d2062TSQCi3utf__T10byCodeUnitTQBrZQrFQByZ14ByCodeUnitImplTQCtTQCxTQDbTQDfTQDjTQDnTQDrTQDvZQEmFNaNfQDoQEkQEnQEqQEtQEwQEzQFcQFfZk@Base 12
+ _D3std9algorithm9searching__T10startsWithVAyaa6_61203d3d2062TSQCi3utf__T10byCodeUnitTQBrZQrFQByZ14ByCodeUnitImplTQCtTQCxTQDbTQDfTQDjTQDnTQDrZQEiFNaNfQDkQEgQEjQEmQEpQEsQEvQEyZk@Base 12
+ _D3std9algorithm9searching__T10startsWithVAyaa6_61203d3d2062TSQCi3utf__T10byCodeUnitTQBrZQrFQByZ14ByCodeUnitImplTQCtTQCxTQDbTQDfTQDjTQDnZQEeFNaNfQDgQEcQEfQEiQElQEoQErZk@Base 12
+ _D3std9algorithm9searching__T10startsWithVAyaa6_61203d3d2062TSQCi3utf__T10byCodeUnitTQBrZQrFQByZ14ByCodeUnitImplTQCtTQCxTQDbTQDfTQDjZQEaFNaNfQDcQDyQEbQEeQEhQEkZk@Base 12
+ _D3std9algorithm9searching__T10startsWithVAyaa6_61203d3d2062TSQCi3utf__T10byCodeUnitTQBrZQrFQByZ14ByCodeUnitImplTQCtTQCxTQDbTQDfZQDwFNaNfQCyQDuQDxQEaQEdZk@Base 12
+ _D3std9algorithm9searching__T10startsWithVAyaa6_61203d3d2062TSQCi3utf__T10byCodeUnitTQBrZQrFQByZ14ByCodeUnitImplTQCtTQCxTQDbZQDsFNaNfQCuQDqQDtQDwZk@Base 12
+ _D3std9algorithm9searching__T10startsWithVAyaa6_61203d3d2062TSQCi3utf__T10byCodeUnitTQBrZQrFQByZ14ByCodeUnitImplTQCtTQCxZQDoFNaNfQCqQDmQDpZk@Base 12
+ _D3std9algorithm9searching__T10startsWithVAyaa6_61203d3d2062TSQCi3utf__T10byCodeUnitTQBrZQrFQByZ14ByCodeUnitImplTQCtZQDkFNaNfQCmQDiZb@Base 12
+ _D3std9algorithm9searching__T10startsWithVAyaa6_61203d3d2062TSQCi3utf__T10byCodeUnitTQBrZQrFQByZ14ByCodeUnitImplTaTaZQDkFNaNbNiNfQCqaaZk@Base 12
+ _D3std9algorithm9searching__T10startsWithVAyaa6_61203d3d2062TSQCi3utf__T10byCodeUnitTQBrZQrFQByZ14ByCodeUnitImplTaZQDiFNaNbNiNfQCoaZb@Base 12
+ _D3std9algorithm9searching__T14balancedParensTAxaTaZQxFNaNbNiNfQraamZb@Base 12
+ _D3std9algorithm9searching__T3anyS_DQBi4path14isDirSeparatorFNaNbNiNfwZbZ__TQBvTAxaZQCdFNaNfQmZb@Base 12
+ _D3std9algorithm9searching__T3anyVAyaa6_6120213d2030Z__TQBbTAkZQBiFNaNbNiNfQpZb@Base 12
+ _D3std9algorithm9searching__T3anyVAyaa6_6120213d2030Z__TQBbTAxkZQBjFNaNbNiNfQqZb@Base 12
+ _D3std9algorithm9searching__T4findS_DQBj4path14isDirSeparatorFNaNbNiNfwZbTAxaZQBxFNaNfQmZQp@Base 12
+ _D3std9algorithm9searching__T4findVAyaa6_6120213d2030TAkZQBcFNaNbNiNfQpZQs@Base 12
+ _D3std9algorithm9searching__T4findVAyaa6_6120213d2030TAxkZQBdFNaNbNiNfQqZQt@Base 12
+ _D3std9algorithm9searching__T4findVAyaa6_61203d3d2062TAaTQdZQBfFNaNbNiNfQsMQvZQy@Base 12
+ _D3std9algorithm9searching__T4findVAyaa6_61203d3d2062TAaTQdZQBfFQkMQnZ__T5forceTAhTQBdZQoFNaNbNiNeNgAaZQx@Base 12
+ _D3std9algorithm9searching__T4findVAyaa6_61203d3d2062TAaTQdZQBfFQkMQnZ__T5forceTQBaTAhZQoFNaNbNiNeNgAhZQBx@Base 12
+ _D3std9algorithm9searching__T4findVAyaa6_61203d3d2062TAaTaZQBeFNaNfQnaZQr@Base 12
+ _D3std9algorithm9searching__T4findVAyaa6_61203d3d2062TAaTaZQBeFQjaZ13trustedMemchrFNaNbNiNeNkMKNgAaKxaZNgQi@Base 12
+ _D3std9algorithm9searching__T4findVAyaa6_61203d3d2062TAhTQdZQBfFNaNbNiNfQsMQvZQy@Base 12
+ _D3std9algorithm9searching__T4findVAyaa6_61203d3d2062TAxaTQxZQBgFNaNbNiNfQtMQBpZQBa@Base 12
+ _D3std9algorithm9searching__T4findVAyaa6_61203d3d2062TAxaTQxZQBgFQlMQBhZ__T5forceTAhTAaZQnFNaNbNiNeNgAaZQw@Base 12
+ _D3std9algorithm9searching__T4findVAyaa6_61203d3d2062TAxaTQxZQBgFQlMQBhZ__T5forceTQBcTAhZQoFNaNbNiNeNgAhZQBz@Base 12
+ _D3std9algorithm9searching__T4findVAyaa6_61203d3d2062TAyAaTQyZQBhFNaNbNiNfQuMQBqZQBb@Base 12
+ _D3std9algorithm9searching__T4findVAyaa6_61203d3d2062TAyhTAxhZQBhFNaNbNiNfQuMQtZQBa@Base 12
+ _D3std9algorithm9searching__T4findVAyaa6_61203d3d2062TAyhTQxZQBgFNaNfQpMQBlZQw@Base 12
+ _D3std9algorithm9searching__T4findVAyaa6_61203d3d2062TQtTAaZQBfFNaNbNiNfQBlMQtZQBs@Base 12
+ _D3std9algorithm9searching__T4findVAyaa6_61203d3d2062TQtTAaZQBfFQBdMQlZ__T5forceTAhTQBbZQoFNaNbNiNeNgAaZQx@Base 12
+ _D3std9algorithm9searching__T4findVAyaa6_61203d3d2062TQtTAaZQBfFQBdMQlZ__T5forceTQBuTAhZQoFNaNbNiNeNgAhZQCr@Base 12
+ _D3std9algorithm9searching__T4findVAyaa6_61203d3d2062TQtTQwZQBfFNaNbNiNfQBlMQBpZQBt@Base 12
+ _D3std9algorithm9searching__T4findVAyaa6_61203d3d2062TQtTQwZQBfFQBdMQBhZ__T5forceTAhTAaZQnFNaNbNiNeNgAaZQw@Base 12
+ _D3std9algorithm9searching__T4findVAyaa6_61203d3d2062TQtTQwZQBfFQBdMQBhZ__T5forceTQBvTAhZQoFNaNbNiNeNgAhZQCs@Base 12
+ _D3std9algorithm9searching__T4findVAyaa6_61203d3d2062TQtTaZQBeFNaNfQBgaZQBl@Base 12
+ _D3std9algorithm9searching__T4findVAyaa6_61203d3d2062TQtTaZQBeFQBcaZ13trustedMemchrFNaNbNiNeNkMKNgAyaKxaZNgQj@Base 12
+ _D3std9algorithm9searching__T4findVAyaa6_61203d3d2062TQtTxaZQBfFNaNfQBhxaZQBn@Base 12
+ _D3std9algorithm9searching__T4findVAyaa6_61203d3d2062TQtTxaZQBfFQBdxaZ13trustedMemchrFNaNbNiNeNkMKNgAyaKxaZNgQj@Base 12
+ _D3std9algorithm9searching__T5countTAyaZQlFNaNbNiNfQpZm@Base 12
+ _D3std9algorithm9searching__T5countVAyaa6_61203d3d2062TQtTQwZQBgFNaNbNiNfQBlQBoZm@Base 12
+ _D3std9algorithm9searching__T7canFindVAyaa6_6120213d2030Z__TQBfTAkZQBmFNaNbNiNfQpZb@Base 12
+ _D3std9algorithm9searching__T7canFindVAyaa6_6120213d2030Z__TQBfTAxkZQBnFNaNbNiNfQqZb@Base 12
+ _D3std9algorithm9searching__T7canFindZ__TQmTAaTaZQuFNaNfQmaZb@Base 12
+ _D3std9algorithm9searching__T7canFindZ__TQmTAyAaTAyaZQyFNaNbNiNfQuMQsZb@Base 12
+ _D3std9algorithm9searching__T7canFindZ__TQmTAyhTAyaZQxFNaNfQpMQoZb@Base 12
+ _D3std9algorithm9searching__T8endsWithVAyaa6_61203d3d2062TQtTQwZQBjFNaNbNiNfQBlQBoZb@Base 12
+ _D3std9algorithm9searching__T8findSkipVAyaa6_61203d3d2062TQtTQwZQBjFNaNbNiNfKQBmQBpZb@Base 12
+ _D3std9algorithm9searching__T8skipOverZ__TQnTAxwTAywZQyFNaNbNiNfKQuQsZb@Base 12
+ _D3std9algorithm9searching__T8skipOverZ__TQnTSQBs3utf__T5byUTFTwVEQCm8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDiTSQFpQDx__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQFkFNcQCeZ6ResultTAywZQHhFNaNbNiNfKQHeQuZb@Base 12
+ _D3std9algorithm9searching__T8skipOverZ__TQnTSQBs3utf__T5byUTFTwVEQCm8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCai1Z__TQDiTSQFpQDx__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImplZQFkFNcQCeZ6ResultTAywZQHhFNaNbNiNfKQHeQuZb@Base 12
+ _D3std9algorithm9searching__T9findSplitVAyaa6_61203d3d2062TQtTQwZQBkFNaNbNiNfQBlQBoZSQDfQDeQCx__TQCqVQCja6_61203d3d2062TQDcTQDgZQDvFQDoQDrZ__T6ResultTQEgTQEkZQq@Base 12
+ _D3std9algorithm9searching__T9findSplitVAyaa6_61203d3d2062TQtTQwZQBkFQBdQBgZ__T6ResultTQBvTQBzZQq11__xopEqualsMxFKxSQEkQEjQEc__TQDvVQDoa6_61203d3d2062TQEhTQElZQFaFQEtQEwZ__TQDqTQFhTQFlZQEcZb@Base 12
+ _D3std9algorithm9searching__T9findSplitVAyaa6_61203d3d2062TQtTQwZQBkFQBdQBgZ__T6ResultTQBvTQBzZQq6__ctorMFNaNbNcNiNfQCyQDbQDeZSQEvQEuQEn__TQEgVQDza6_61203d3d2062TQEsTQEwZQFlFQFeQFhZ__TQEbTQFsTQFwZQEn@Base 12
+ _D3std9algorithm9searching__T9findSplitVAyaa6_61203d3d2062TQtTQwZQBkFQBdQBgZ__T6ResultTQBvTQBzZQq6__initZ@Base 12
+ _D3std9algorithm9searching__T9findSplitVAyaa6_61203d3d2062TQtTQwZQBkFQBdQBgZ__T6ResultTQBvTQBzZQq8opAssignMFNaNbNcNiNjNfSQEpQEoQEh__TQEaVQDta6_61203d3d2062TQEmTQEqZQFfFQEyQFbZ__TQDvTQFmTQFqZQEhZQCw@Base 12
+ _D3std9algorithm9searching__T9findSplitVAyaa6_61203d3d2062TQtTQwZQBkFQBdQBgZ__T6ResultTQBvTQBzZQq8opAssignMFNaNbNiNfSQEl8typecons__T5TupleTQDvTQDzTQEdZQtZv@Base 12
+ _D3std9algorithm9searching__T9findSplitVAyaa6_61203d3d2062TQtTQwZQBkFQBdQBgZ__T6ResultTQBvTQBzZQq9__xtoHashFNbNeKxSQEjQEiQEb__TQDuVQDna6_61203d3d2062TQEgTQEkZQEzFQEsQEvZ__TQDpTQFgTQFkZQEbZm@Base 12
+ _D3std9container10binaryheap11__moduleRefZ@Base 12
+ _D3std9container10binaryheap12__ModuleInfoZ@Base 12
+ _D3std9container11__moduleRefZ@Base 12
+ _D3std9container12__ModuleInfoZ@Base 12
+ _D3std9container4util11__moduleRefZ@Base 12
+ _D3std9container4util12__ModuleInfoZ@Base 12
+ _D3std9container5array11__moduleRefZ@Base 12
+ _D3std9container5array12__ModuleInfoZ@Base 12
+ _D3std9container5dlist11__moduleRefZ@Base 12
+ _D3std9container5dlist12__ModuleInfoZ@Base 12
+ _D3std9container5dlist6DRange4backMFNaNbNdNjNfZPSQBvQBuQBn8BaseNode@Base 12
+ _D3std9container5dlist6DRange4saveMFNaNbNdNjNfZSQBuQBtQBmQBj@Base 12
+ _D3std9container5dlist6DRange5emptyMxFNaNbNdNlNfZb@Base 12
+ _D3std9container5dlist6DRange5frontMFNaNbNdNjNfZPSQBwQBvQBo8BaseNode@Base 12
+ _D3std9container5dlist6DRange6__ctorMFNaNbNcNfPSQBuQBtQBm8BaseNodeQuZSQCqQCpQCiQCf@Base 12
+ _D3std9container5dlist6DRange6__ctorMFNaNbNcNfPSQBuQBtQBm8BaseNodeZSQCoQCnQCgQCd@Base 12
+ _D3std9container5dlist6DRange6__initZ@Base 12
+ _D3std9container5dlist6DRange7popBackMFNaNbNlNfZv@Base 12
+ _D3std9container5dlist6DRange8popFrontMFNaNbNlNfZv@Base 12
+ _D3std9container5dlist8BaseNode6__initZ@Base 12
+ _D3std9container5dlist8BaseNode7connectFNaNbNfPSQBuQBtQBmQBjQoZv@Base 12
+ _D3std9container5slist11__moduleRefZ@Base 12
+ _D3std9container5slist12__ModuleInfoZ@Base 12
+ _D3std9container6rbtree11__moduleRefZ@Base 12
+ _D3std9container6rbtree12__ModuleInfoZ@Base 12
+ _D3std9exception11__moduleRefZ@Base 12
+ _D3std9exception11errnoStringFNbNeiZAya@Base 12
+ _D3std9exception12__ModuleInfoZ@Base 12
+ _D3std9exception14ErrnoException5errnoMFNaNbNdNiNfZk@Base 12
+ _D3std9exception14ErrnoException6__ctorMFNfAyaQdmZCQBxQBwQBp@Base 12
+ _D3std9exception14ErrnoException6__ctorMFNfAyaiQemZCQByQBxQBq@Base 12
+ _D3std9exception14ErrnoException6__initZ@Base 12
+ _D3std9exception14ErrnoException6__vtblZ@Base 12
+ _D3std9exception14ErrnoException7__ClassZ@Base 12
+ _D3std9exception14RangePrimitive6__initZ@Base 12
+ _D3std9exception__T11doesPointToTAAyaTQfTvZQyFNaNbNiNeKxAAyaKxQgZb@Base 12
+ _D3std9exception__T11doesPointToTACQBh3zip13ArchiveMemberTQzTvZQBsFNaNbNiNeKxACQCzQBsQBrKxQnZb@Base 12
+ _D3std9exception__T11doesPointToTASQBh5regex8internal2ir10NamedGroupTQBkTvZQCeFNaNbNiNeKxASQDlQCeQCbQBvQBvKxQtZb@Base 12
+ _D3std9exception__T11doesPointToTASQBh8datetime8timezone13PosixTimeZone10LeapSecondTQBzTvZQCtFNaNbNiNeKxASQEaQCtQCnQChQBvKxQtZb@Base 12
+ _D3std9exception__T11doesPointToTASQBh8datetime8timezone13PosixTimeZone14TempTransitionTQCdTvZQCxFNaNbNiNeKxASQEeQCxQCrQClQBzKxQtZb@Base 12
+ _D3std9exception__T11doesPointToTAkTQdTvZQwFNaNbNiNeKxAkKxQeZb@Base 12
+ _D3std9exception__T11doesPointToTAxSQBi4file15DirIteratorImpl9DirHandleTSQCtQBlQBjTvZQCoFNaNbNiNeKxAQCnKxSQEaQCsQCqZb@Base 12
+ _D3std9exception__T11doesPointToTAxSQBi4file8DirEntryTSQCbQt15DirIteratorImplTvZQCjFNaNbNiNeKxAQCiKxSQDvQCnQBvZb@Base 12
+ _D3std9exception__T11doesPointToTAxkTSQBk3uni__T13InversionListTSQClQBb8GcPolicyZQBh__T9IntervalsTAkZQoTvZQDjFNaNbNiNeKxAkKxSQEtQDj__TQDiTQCwZQDq__TQCjTQCcZQCrZb@Base 12
+ _D3std9exception__T11doesPointToTAyaTSQBk3net4curl3FTP4ImplTvZQBrFNaNbNiNeKxAyaKxSQDcQBsQBrQBpQBoZb@Base 12
+ _D3std9exception__T11doesPointToTAyaTSQBk3net4curl4HTTP4ImplTvZQBsFNaNbNiNeKxAyaKxSQDdQBtQBsQBqQBoZb@Base 12
+ _D3std9exception__T11doesPointToTAyaTSQBk4file15DirIteratorImplTvZQBvFNaNbNiNeKxAyaKxSQDgQBwQBuZb@Base 12
+ _D3std9exception__T11doesPointToTAyaTSQBk5stdio17LockingTextReaderTvZQByFNaNbNiNeKxAyaKxSQDjQBzQBwZb@Base 12
+ _D3std9exception__T11doesPointToTAyaTSQBk5stdio4FileTvZQBkFNaNbNiNeKxAyaKxSQCvQBlQBiZb@Base 12
+ _D3std9exception__T11doesPointToTCQBg3zip13ArchiveMemberTQyTvZQBrFNaNbNiNeKxCQCxQBrQBqKxQmZb@Base 12
+ _D3std9exception__T11doesPointToTDFAhZmTSQBn3net4curl3FTP4ImplTvZQBuFNaNbNiNeKxDQBuKxSQDgQBtQBsQBqQBpZb@Base 12
+ _D3std9exception__T11doesPointToTDFAhZmTSQBn3net4curl4HTTP4ImplTvZQBvFNaNbNiNeKxDQBvKxSQDhQBuQBtQBrQBpZb@Base 12
+ _D3std9exception__T11doesPointToTDFAhZmTSQBn3net4curl4SMTP4ImplTvZQBvFNaNbNiNeKxDQBvKxSQDhQBuQBtQBrQBpZb@Base 12
+ _D3std9exception__T11doesPointToTDFAvZmTSQBn3net4curl3FTP4ImplTvZQBuFNaNbNiNeKxDQBuKxSQDgQBtQBsQBqQBpZb@Base 12
+ _D3std9exception__T11doesPointToTDFAvZmTSQBn3net4curl4HTTP4ImplTvZQBvFNaNbNiNeKxDQBvKxSQDhQBuQBtQBrQBpZb@Base 12
+ _D3std9exception__T11doesPointToTDFAvZmTSQBn3net4curl4SMTP4ImplTvZQBvFNaNbNiNeKxDQBvKxSQDhQBuQBtQBrQBpZb@Base 12
+ _D3std9exception__T11doesPointToTDFEQBi6socket8socket_tE3etc1c4curl12CurlSockTypeZiTSQDf3netQBe3FTP4ImplTvZQDkFNaNbNiNeKxDQDkKxSQEwQBrQCuQBqQBpZb@Base 12
+ _D3std9exception__T11doesPointToTDFEQBi6socket8socket_tE3etc1c4curl12CurlSockTypeZiTSQDf3netQBe4HTTP4ImplTvZQDlFNaNbNiNeKxDQDlKxSQExQBsQCvQBrQBpZb@Base 12
+ _D3std9exception__T11doesPointToTDFEQBi6socket8socket_tE3etc1c4curl12CurlSockTypeZiTSQDf3netQBe4SMTP4ImplTvZQDlFNaNbNiNeKxDQDlKxSQExQBsQCvQBrQBpZb@Base 12
+ _D3std9exception__T11doesPointToTDFIAaZvTSQBo3net4curl3FTP4ImplTvZQBvFNaNbNiNeKxDQBvKxSQDhQBtQBsQBqQBpZb@Base 12
+ _D3std9exception__T11doesPointToTDFIAaZvTSQBo3net4curl4HTTP4ImplTvZQBwFNaNbNiNeKxDQBwKxSQDiQBuQBtQBrQBpZb@Base 12
+ _D3std9exception__T11doesPointToTDFIAaZvTSQBo3net4curl4SMTP4ImplTvZQBwFNaNbNiNeKxDQBwKxSQDiQBuQBtQBrQBpZb@Base 12
+ _D3std9exception__T11doesPointToTDFSQBi3net4curl4HTTP10StatusLineZvTSQCpQBhQBgQBe4ImplTvZQCsFNaNbNiNeKxDQCsKxSQEeQCwQCvQCtQBpZb@Base 12
+ _D3std9exception__T11doesPointToTDFlE3etc1c4curl11CurlSeekPosZEQBaQzQz8CurlSeekTSQDb3netQBt3FTP4ImplTvZQDgFNaNbNiNeKxDQDgKxSQEsQBrQDjQBqQBpZb@Base 12
+ _D3std9exception__T11doesPointToTDFlE3etc1c4curl11CurlSeekPosZEQBaQzQz8CurlSeekTSQDb3netQBt4HTTP4ImplTvZQDhFNaNbNiNeKxDQDhKxSQEtQBsQDkQBrQBpZb@Base 12
+ _D3std9exception__T11doesPointToTDFlE3etc1c4curl11CurlSeekPosZEQBaQzQz8CurlSeekTSQDb3netQBt4SMTP4ImplTvZQDhFNaNbNiNeKxDQDhKxSQEtQBsQDkQBrQBpZb@Base 12
+ _D3std9exception__T11doesPointToTDFmmmmZiTSQBp3net4curl3FTP4ImplTvZQBwFNaNbNiNeKxDQBwKxSQDiQBtQBsQBqQBpZb@Base 12
+ _D3std9exception__T11doesPointToTDFmmmmZiTSQBp3net4curl4HTTP4ImplTvZQBxFNaNbNiNeKxDQBxKxSQDjQBuQBtQBrQBpZb@Base 12
+ _D3std9exception__T11doesPointToTDFmmmmZiTSQBp3net4curl4SMTP4ImplTvZQBxFNaNbNiNeKxDQBxKxSQDjQBuQBtQBrQBpZb@Base 12
+ _D3std9exception__T11doesPointToTEQBg3net4curl4HTTP6MethodTSQCgQBaQzQw4ImplTvZQChFNaNbNiNeKxEQDnQChQCgQCeQCcKxSQEfQCzQCyQCwQCbZb@Base 12
+ _D3std9exception__T11doesPointToTEQBg4file8SpanModeTSQBzQt15DirIteratorImplTvZQChFNaNbNiNeKxEQDnQChQCfKxSQDzQCtQCbZb@Base 12
+ _D3std9exception__T11doesPointToTG3lTSQBk4file15DirIteratorImplTvZQBvFNaNbNiNeKxG3lKxSQDgQBwQBuZb@Base 12
+ _D3std9exception__T11doesPointToTHAyaxAyaTSQBp3net4curl4HTTP4ImplTvZQBxFNaNbNiNeKxHQBxQBwKxSQDmQBxQBwQBuQBsZb@Base 12
+ _D3std9exception__T11doesPointToTPSQBh8typecons__T10RefCountedTSQCk3net4curl3FTP4ImplVEQDhQCa24RefCountedAutoInitializei1ZQCu15RefCountedStoreQCkTQEjTvZQFdFNaNbNiNeKxPSQGkQFd__TQExTQEoVQDvi1ZQFlQCrQEnKxQBkZb@Base 12
+ _D3std9exception__T11doesPointToTPSQBh8typecons__T10RefCountedTSQCk3net4curl4HTTP4ImplVEQDiQCb24RefCountedAutoInitializei1ZQCv15RefCountedStoreQCkTQEkTvZQFeFNaNbNiNeKxPSQGlQFe__TQEyTQEpVQDvi1ZQFmQCrQEnKxQBkZb@Base 12
+ _D3std9exception__T11doesPointToTPSQBh8typecons__T10RefCountedTSQCk3net4curl4SMTP4ImplVEQDiQCb24RefCountedAutoInitializei1ZQCv15RefCountedStoreQCkTQEkTvZQFeFNaNbNiNeKxPSQGlQFe__TQEyTQEpVQDvi1ZQFmQCrQEnKxQBkZb@Base 12
+ _D3std9exception__T11doesPointToTPSQBh8typecons__T10RefCountedTSQCk4file15DirIteratorImplVEQDlQCe24RefCountedAutoInitializei0ZQCy15RefCountedStore4ImplTQEpTvZQFjFNaNbNiNeKxPSQGqQFj__TQFdTQEuVQDxi0ZQFrQCtQCfKxQBkZb@Base 12
+ _D3std9exception__T11doesPointToTPxS3etc1c4curl10curl_slistTSQCh3netQBa3FTP4ImplTvZQCmFNaNbNiNeKxPQClKxSQDyQBrQCqQBqQBpZb@Base 12
+ _D3std9exception__T11doesPointToTPxS3etc1c4curl10curl_slistTSQCh3netQBa4HTTP4ImplTvZQCnFNaNbNiNeKxPQCmKxSQDzQBsQCrQBrQBpZb@Base 12
+ _D3std9exception__T11doesPointToTPxSQBi5stdio4File4ImplTSQCdQv17LockingTextReaderTvZQCnFNaNbNiNeKxPQCmKxSQDzQCrQBxZb@Base 12
+ _D3std9exception__T11doesPointToTPxSQBi5stdio4File4ImplTSQCdQvQrTvZQBwFNaNbNiNeKxPQBvKxSQDiQCaQBxZb@Base 12
+ _D3std9exception__T11doesPointToTPxSQBi8datetime8timezone13PosixTimeZone14TransitionTypeTSQDkQCcQBwQBq14TempTransitionTvZQDyFNaNbNiNeKxPQDxKxSQFkQEcQDwQDqQCaZb@Base 12
+ _D3std9exception__T11doesPointToTPxvTSQBk3net4curl3FTP4ImplTvZQBrFNaNbNiNeKxPvKxSQDbQBrQBqQBoQBnZb@Base 12
+ _D3std9exception__T11doesPointToTPxvTSQBk3net4curl4HTTP4ImplTvZQBsFNaNbNiNeKxPvKxSQDcQBsQBrQBpQBnZb@Base 12
+ _D3std9exception__T11doesPointToTPxvTSQBk3net4curl4SMTP4ImplTvZQBsFNaNbNiNeKxPvKxSQDcQBsQBrQBpQBnZb@Base 12
+ _D3std9exception__T11doesPointToTPySQBi8datetime8timezone13PosixTimeZone6TTInfoTSQDbQBtQBnQBh14TempTransitionTvZQDpFNaNbNiNeKxPyQDpKxSQFcQDuQDoQDiQCbZb@Base 12
+ _D3std9exception__T11doesPointToTS4core3sys5posixQk4stat6stat_tTSQCl4file15DirIteratorImplTvZQCwFNaNbNiNeKxSQCwQCuQCtQDaQCrQCpKxSQExQCmQCkZb@Base 12
+ _D3std9exception__T11doesPointToTSQBg3net4curl3FTP4ImplTQxTvZQBqFNaNbNiNeKxSQCwQBqQBpQBnQBmKxQsZb@Base 12
+ _D3std9exception__T11doesPointToTSQBg3net4curl4CurlTSQBzQtQr3FTP4ImplTvZQCbFNaNbNiNeKxSQDhQCbQCaQByKxSQDwQCqQCpQBzQByZb@Base 12
+ _D3std9exception__T11doesPointToTSQBg3net4curl4CurlTSQBzQtQr4HTTP4ImplTvZQCcFNaNbNiNeKxSQDiQCcQCbQBzKxSQDxQCrQCqQCaQByZb@Base 12
+ _D3std9exception__T11doesPointToTSQBg3net4curl4CurlTSQBzQtQr4SMTP4ImplTvZQCcFNaNbNiNeKxSQDiQCcQCbQBzKxSQDxQCrQCqQCaQByZb@Base 12
+ _D3std9exception__T11doesPointToTSQBg3net4curl4HTTP10StatusLineTSQClQBfQBeQBc4ImplTvZQCoFNaNbNiNeKxSQDuQCoQCnQClQCjKxSQEmQDgQDfQDdQCbZb@Base 12
+ _D3std9exception__T11doesPointToTSQBg3net4curl4HTTP4ImplTQyTvZQBrFNaNbNiNeKxSQCxQBrQBqQBoQBmKxQsZb@Base 12
+ _D3std9exception__T11doesPointToTSQBg3net4curl4SMTP4ImplTQyTvZQBrFNaNbNiNeKxSQCxQBrQBqQBoQBmKxQsZb@Base 12
+ _D3std9exception__T11doesPointToTSQBg3uni__T13InversionListTSQChQBb8GcPolicyZQBh__T9IntervalsTAkZQoTQCpTvZQDjFNaNbNiNeKxSQEpQDj__TQDiTQCwZQDq__TQCjTQCcZQCrKxQBlZb@Base 12
+ _D3std9exception__T11doesPointToTSQBg4file15DirIteratorImplTQBbTvZQBvFNaNbNiNeKxSQDbQBvQBtKxQmZb@Base 12
+ _D3std9exception__T11doesPointToTSQBg4file8DirEntryTSQBzQt15DirIteratorImplTvZQChFNaNbNiNeKxSQDnQChQCfKxSQDzQCtQCbZb@Base 12
+ _D3std9exception__T11doesPointToTSQBg5stdio17LockingTextReaderTQBeTvZQByFNaNbNiNeKxSQDeQByQBvKxQmZb@Base 12
+ _D3std9exception__T11doesPointToTSQBg5stdio4FileTQqTvZQBjFNaNbNiNeKxSQCpQBjQBgKxQmZb@Base 12
+ _D3std9exception__T11doesPointToTSQBg5stdio4FileTSQBwQq17LockingTextReaderTvZQCgFNaNbNiNeKxSQDmQCgQCdKxSQDyQCsQCdZb@Base 12
+ _D3std9exception__T11doesPointToTSQBg8datetime8timezone13PosixTimeZone14TempTransitionTQCcTvZQCwFNaNbNiNeKxSQEcQCwQCqQCkQByKxQsZb@Base 12
+ _D3std9exception__T11doesPointToTaTSQBi5stdio17LockingTextReaderTvZQBwFNaNbNiNeKxaKxSQDfQBxQBuZb@Base 12
+ _D3std9exception__T11doesPointToTbTSQBi3net4curl3FTP4ImplTvZQBpFNaNbNiNeKxbKxSQCyQBqQBpQBnQBmZb@Base 12
+ _D3std9exception__T11doesPointToTbTSQBi3net4curl4HTTP4ImplTvZQBqFNaNbNiNeKxbKxSQCzQBrQBqQBoQBmZb@Base 12
+ _D3std9exception__T11doesPointToTbTSQBi3net4curl4SMTP4ImplTvZQBqFNaNbNiNeKxbKxSQCzQBrQBqQBoQBmZb@Base 12
+ _D3std9exception__T11doesPointToTbTSQBi4file15DirIteratorImplTvZQBtFNaNbNiNeKxbKxSQDcQBuQBsZb@Base 12
+ _D3std9exception__T11doesPointToTbTSQBi5stdio17LockingTextReaderTvZQBwFNaNbNiNeKxbKxSQDfQBxQBuZb@Base 12
+ _D3std9exception__T11doesPointToThTSQBi4file15DirIteratorImplTvZQBtFNaNbNiNeKxhKxSQDcQBuQBsZb@Base 12
+ _D3std9exception__T11doesPointToTkTSQBi4file15DirIteratorImplTvZQBtFNaNbNiNeKxkKxSQDcQBuQBsZb@Base 12
+ _D3std9exception__T11doesPointToTlTSQBi4file15DirIteratorImplTvZQBtFNaNbNiNeKxlKxSQDcQBuQBsZb@Base 12
+ _D3std9exception__T11doesPointToTlTSQBi8datetime8timezone13PosixTimeZone14TempTransitionTvZQCuFNaNbNiNeKxlKxSQEdQCvQCpQCjQBxZb@Base 12
+ _D3std9exception__T11doesPointToTmTSQBi3uni__T13InversionListTSQCjQBb8GcPolicyZQBh__T9IntervalsTAkZQoTvZQDhFNaNbNiNeKxmKxSQEqQDi__TQDhTQCvZQDp__TQCiTQCbZQCqZb@Base 12
+ _D3std9exception__T11doesPointToTmTSQBi4file15DirIteratorImplTvZQBtFNaNbNiNeKxmKxSQDcQBuQBsZb@Base 12
+ _D3std9exception__T11doesPointToTtTSQBi3net4curl4HTTP4ImplTvZQBqFNaNbNiNeKxtKxSQCzQBrQBqQBoQBmZb@Base 12
+ _D3std9exception__T12assumeUniqueTaZQrFNaNbNiAaZAya@Base 12
+ _D3std9exception__T12assumeUniqueTaZQrFNaNbNiKAaZAya@Base 12
+ _D3std9exception__T12assumeUniqueTkZQrFNaNbNiKAkZAyk@Base 12
+ _D3std9exception__T16collectExceptionHTC9ExceptionTmZQBiFNaNbNfLmZQBb@Base 12
+ _D3std9exception__T18isUnionAliasedImplTS4core3sys5posixQk4stat6stat_tZQCaFNaNbNiNfmZb@Base 12
+ _D3std9exception__T18isUnionAliasedImplTSQBn3net4curl3FTP4ImplZQBsFNaNbNiNfmZb@Base 12
+ _D3std9exception__T18isUnionAliasedImplTSQBn3net4curl4CurlZQBoFNaNbNiNfmZb@Base 12
+ _D3std9exception__T18isUnionAliasedImplTSQBn3net4curl4HTTP10StatusLineZQCaFNaNbNiNfmZb@Base 12
+ _D3std9exception__T18isUnionAliasedImplTSQBn3net4curl4HTTP4ImplZQBtFNaNbNiNfmZb@Base 12
+ _D3std9exception__T18isUnionAliasedImplTSQBn3net4curl4SMTP4ImplZQBtFNaNbNiNfmZb@Base 12
+ _D3std9exception__T18isUnionAliasedImplTSQBn3uni__T13InversionListTSQCoQBb8GcPolicyZQBh__T9IntervalsTAkZQoZQDkFNaNbNiNfmZb@Base 12
+ _D3std9exception__T18isUnionAliasedImplTSQBn4file15DirIteratorImplZQBwFNaNbNiNfmZb@Base 12
+ _D3std9exception__T18isUnionAliasedImplTSQBn4file8DirEntryZQBoFNaNbNiNfmZb@Base 12
+ _D3std9exception__T18isUnionAliasedImplTSQBn5stdio17LockingTextReaderZQBzFNaNbNiNfmZb@Base 12
+ _D3std9exception__T18isUnionAliasedImplTSQBn5stdio4FileZQBlFNaNbNiNfmZb@Base 12
+ _D3std9exception__T7bailOutHTC4core4time13TimeExceptionZQBlFNaNfAyamMAxaZNn@Base 12
+ _D3std9exception__T7bailOutHTC4coreQBd16OutOfMemoryErrorZQBmFNaNbNfAyamMAxaZNn@Base 12
+ _D3std9exception__T7bailOutHTC9ExceptionZQwFNaNfAyamMAxaZNn@Base 12
+ _D3std9exception__T7bailOutHTCQBc11concurrency19TidMissingExceptionZQBxFNaNfAyamMAxaZNn@Base 12
+ _D3std9exception__T7bailOutHTCQBc3net4curl13CurlExceptionZQBnFNaNfAyamMAxaZNn@Base 12
+ _D3std9exception__T7bailOutHTCQBc3net4curl20CurlTimeoutExceptionZQBuFNaNfAyamMAxaZNn@Base 12
+ _D3std9exception__T7bailOutHTCQBc3zip12ZipExceptionZQBhFNaNfAyamMAxaZNn@Base 12
+ _D3std9exception__T7bailOutHTCQBc4json13JSONExceptionZQBjFNaNfAyamMAxaZNn@Base 12
+ _D3std9exception__T7bailOutHTCQBc6format15FormatExceptionZQBnFNaNfAyamMAxaZNn@Base 12
+ _D3std9exception__T7bailOutHTCQBc7process16ProcessExceptionZQBpFNaNfAyamMAxaZNn@Base 12
+ _D3std9exception__T7bailOutHTCQBcQBb14ErrnoExceptionZQBiFNfAyamMAxaZNn@Base 12
+ _D3std9exception__T7enforceHTC4core4time13TimeExceptionZ__TQBoTbZQBuFNaNfbLAxaAyamZb@Base 12
+ _D3std9exception__T7enforceHTC4coreQBd16OutOfMemoryErrorZ__TQBpTbZQBvFNaNfbLAxaAyamZb@Base 12
+ _D3std9exception__T7enforceHTCQBc11concurrency19TidMissingExceptionZ__TQCaTbZQCgFNaNfbLAxaAyamZb@Base 12
+ _D3std9exception__T7enforceHTCQBc3net4curl13CurlExceptionZ__TQBqTPvZQBxFNaNfQlLAxaAyamZQw@Base 12
+ _D3std9exception__T7enforceHTCQBc3net4curl13CurlExceptionZ__TQBqTbZQBwFNaNfbLAxaAyamZb@Base 12
+ _D3std9exception__T7enforceHTCQBc3net4curl20CurlTimeoutExceptionZ__TQBxTbZQCdFNaNfbLAxaAyamZb@Base 12
+ _D3std9exception__T7enforceHTCQBc3zip12ZipExceptionZ__TQBkTbZQBqFNaNfbLAxaAyamZb@Base 12
+ _D3std9exception__T7enforceHTCQBc4json13JSONExceptionZ__TQBmTPNgSQClQBj9JSONValueZQClFNaNfQBdLAxaAyamZQBp@Base 12
+ _D3std9exception__T7enforceHTCQBc4json13JSONExceptionZ__TQBmTbZQBsFNaNfbLAxaAyamZb@Base 12
+ _D3std9exception__T7enforceHTCQBc6format15FormatExceptionZ__TQBqTbZQBwFNaNfbLAxaAyamZb@Base 12
+ _D3std9exception__T7enforceHTCQBc6format15FormatExceptionZ__TQBqTmZQBwFNaNfmLAxaAyamZm@Base 12
+ _D3std9exception__T7enforceHTCQBc7process16ProcessExceptionZ__TQBsTbZQByFNaNfbLAxaAyamZb@Base 12
+ _D3std9exception__T7enforceHTCQBcQBb14ErrnoExceptionZ__TQBlTPOS4core4stdc5stdio8_IO_FILEZQCsFNfQBjLAxaAyamZQBv@Base 12
+ _D3std9exception__T7enforceHTCQBcQBb14ErrnoExceptionZ__TQBlTbZQBrFNfbLAxaAyamZb@Base 12
+ _D3std9exception__T7enforceHTCQBcQBb14ErrnoExceptionZ__TQBlTiZQBrFNfiLAxaAyamZi@Base 12
+ _D3std9exception__T7enforceTPS4core3sys5posix5netdb7hostentZQBpFNaNfQBoLC6object9ThrowableZQCl@Base 12
+ _D3std9exception__T7enforceTbZQlFNaNfbLC6object9ThrowableZb@Base 12
+ _D3std9exception__T7enforceZ__TQmTAyaZQtFNaNfQlLAxaQrmZQv@Base 12
+ _D3std9exception__T7enforceZ__TQmTPOS4core4stdc5stdio8_IO_FILEZQBsFNaNfQBlLAxaAyamZQBx@Base 12
+ _D3std9exception__T7enforceZ__TQmTPSQBi11concurrency__T4ListTSQCiQBa7MessageZQw4NodeZQCoFNaNfQChLAxaAyamZQCt@Base 12
+ _D3std9exception__T7enforceZ__TQmTPvZQsFNaNfQkLAxaAyamZQv@Base 12
+ _D3std9exception__T7enforceZ__TQmTbZQrFNaNfbLAxaAyamZb@Base 12
+ _D3std9exception__T7enforceZ__TQmTiZQrFNaNfiLAxaAyamZi@Base 12
+ _D3std9exception__T7enforceZ__TQmTkZQrFNaNfkLAxaAyamZk@Base 12
+ _D3std9exception__T7enforceZ__TQmTmZQrFNaNfmLAxaAyamZm@Base 12
+ _D3std9outbuffer11__moduleRefZ@Base 12
+ _D3std9outbuffer12__ModuleInfoZ@Base 12
+ _D3std9outbuffer9OutBuffer11__invariantMxFZv@Base 12
+ _D3std9outbuffer9OutBuffer12__invariant0MxFZv@Base 12
+ _D3std9outbuffer9OutBuffer5clearMFNaNbNfZv@Base 12
+ _D3std9outbuffer9OutBuffer5fill0MFNaNbNfmZv@Base 12
+ _D3std9outbuffer9OutBuffer5writeMFNaNbNeMAxaZv@Base 12
+ _D3std9outbuffer9OutBuffer5writeMFNaNbNeMAxuZv@Base 12
+ _D3std9outbuffer9OutBuffer5writeMFNaNbNeMAxwZv@Base 12
+ _D3std9outbuffer9OutBuffer5writeMFNaNbNedZv@Base 12
+ _D3std9outbuffer9OutBuffer5writeMFNaNbNeeZv@Base 12
+ _D3std9outbuffer9OutBuffer5writeMFNaNbNefZv@Base 12
+ _D3std9outbuffer9OutBuffer5writeMFNaNbNekZv@Base 12
+ _D3std9outbuffer9OutBuffer5writeMFNaNbNemZv@Base 12
+ _D3std9outbuffer9OutBuffer5writeMFNaNbNetZv@Base 12
+ _D3std9outbuffer9OutBuffer5writeMFNaNbNeuZv@Base 12
+ _D3std9outbuffer9OutBuffer5writeMFNaNbNfMAxhZv@Base 12
+ _D3std9outbuffer9OutBuffer5writeMFNaNbNfMxCQBpQBoQBhZv@Base 12
+ _D3std9outbuffer9OutBuffer5writeMFNaNbNfaZv@Base 12
+ _D3std9outbuffer9OutBuffer5writeMFNaNbNfgZv@Base 12
+ _D3std9outbuffer9OutBuffer5writeMFNaNbNfhZv@Base 12
+ _D3std9outbuffer9OutBuffer5writeMFNaNbNfiZv@Base 12
+ _D3std9outbuffer9OutBuffer5writeMFNaNbNflZv@Base 12
+ _D3std9outbuffer9OutBuffer5writeMFNaNbNfsZv@Base 12
+ _D3std9outbuffer9OutBuffer5writeMFNaNbNfwZv@Base 12
+ _D3std9outbuffer9OutBuffer6__initZ@Base 12
+ _D3std9outbuffer9OutBuffer6__vtblZ@Base 12
+ _D3std9outbuffer9OutBuffer6align2MFNaNbNfZv@Base 12
+ _D3std9outbuffer9OutBuffer6align4MFNaNbNfZv@Base 12
+ _D3std9outbuffer9OutBuffer6printfMFNeMAyaYv@Base 12
+ _D3std9outbuffer9OutBuffer6spreadMFNaNbNfmmZv@Base 12
+ _D3std9outbuffer9OutBuffer7__ClassZ@Base 12
+ _D3std9outbuffer9OutBuffer7reserveMFNaNbNemZv@Base 12
+ _D3std9outbuffer9OutBuffer7toBytesMNgFNaNbNlNfZANgh@Base 12
+ _D3std9outbuffer9OutBuffer7vprintfMFNbNeMAyaG1S3gcc8builtins13__va_list_tagZv@Base 12
+ _D3std9outbuffer9OutBuffer8toStringMxFNaNbNfZAya@Base 12
+ _D3std9outbuffer9OutBuffer9alignSizeMFNaNbNfmZv@Base 12
+ _D3std9typetuple11__moduleRefZ@Base 12
+ _D3std9typetuple12__ModuleInfoZ@Base 12
+ _D40TypeInfo_AxAS3std3uni17CodepointInterval6__initZ@Base 12
+ _D40TypeInfo_C3std11concurrency11IsGenerator6__initZ@Base 12
+ _D40TypeInfo_E3std3uni20UnicodeDecomposition6__initZ@Base 12
+ _D40TypeInfo_E3std5range18SortedRangeOptions6__initZ@Base 12
+ _D40TypeInfo_E3std6socket17SocketOptionLevel6__initZ@Base 12
+ _D40TypeInfo_E3std6traits17FunctionAttribute6__initZ@Base 12
+ _D40TypeInfo_E3std7numeric16CustomFloatFlags6__initZ@Base 12
+ _D40TypeInfo_E3std8encoding15Windows1250Char6__initZ@Base 12
+ _D40TypeInfo_E3std8encoding15Windows1251Char6__initZ@Base 12
+ _D40TypeInfo_E3std8encoding15Windows1252Char6__initZ@Base 12
+ _D40TypeInfo_E3std9exception14RangePrimitive6__initZ@Base 12
+ _D40TypeInfo_E4core6stdcpp4new_11align_val_t6__initZ@Base 12
+ _D40TypeInfo_E4core6thread5fiber5Fiber5State6__initZ@Base 12
+ _D40TypeInfo_S3std3net4curl4HTTP10StatusLine6__initZ@Base 12
+ _D40TypeInfo_S3std3uni__T9BitPackedTbVmi1ZQr6__initZ@Base 12
+ _D40TypeInfo_S3std3uni__T9BitPackedTkVmi7ZQr6__initZ@Base 12
+ _D40TypeInfo_S3std3uni__T9BitPackedTkVmi8ZQr6__initZ@Base 12
+ _D40TypeInfo_S3std5array__T8AppenderTAAyaZQp6__initZ@Base 12
+ _D40TypeInfo_S3std5range__T10OnlyResultTaZQp6__initZ@Base 12
+ _D40TypeInfo_S4core3sys5linux4tipc9tipc_name6__initZ@Base 12
+ _D40TypeInfo_S4core3sys5posix5netdb8addrinfo6__initZ@Base 12
+ _D40TypeInfo_S4core3sys5posix5netdb8protoent6__initZ@Base 12
+ _D40TypeInfo_S4core3sys5posix6mqueue7mq_attr6__initZ@Base 12
+ _D40TypeInfo_S4core3sys5posix6signal7stack_t6__initZ@Base 12
+ _D40TypeInfo_S4core3sys5posixQk3ipc8ipc_perm6__initZ@Base 12
+ _D40TypeInfo_S4core3sys5posixQk3msg8msqid_ds6__initZ@Base 12
+ _D40TypeInfo_S4core3sys5posixQk3shm8shmid_ds6__initZ@Base 12
+ _D40TypeInfo_S4core3sys5posixQk4time7timeval6__initZ@Base 12
+ _D40TypeInfo_S4core3sys5posixQk5ioctl6termio6__initZ@Base 12
+ _D40TypeInfo_xAAS3std3uni17CodepointInterval6__initZ@Base 12
+ _D40TypeInfo_xS3std5regex__T8CapturesTAxaZQo6__initZ@Base 12
+ _D40TypeInfo_xS3std8datetime7systime7SysTime6__initZ@Base 12
+ _D40TypeInfo_xS4core3sys5posixQk4stat6stat_t6__initZ@Base 12
+ _D41TypeInfo_AE3std8encoding15Windows1250Char6__initZ@Base 12
+ _D41TypeInfo_AE3std8encoding15Windows1251Char6__initZ@Base 12
+ _D41TypeInfo_AE3std8encoding15Windows1252Char6__initZ@Base 12
+ _D41TypeInfo_E3etc1c4curl18CurlFInfoFlagKnown6__initZ@Base 12
+ _D41TypeInfo_E4core3sys5posixQk4wait8idtype_t6__initZ@Base 12
+ _D41TypeInfo_E4core3sys5posixQk7statvfs5FFlag6__initZ@Base 12
+ _D41TypeInfo_E4core4stdc6config12__c_longlong6__initZ@Base 12
+ _D41TypeInfo_FZC3std8encoding14EncodingScheme6__initZ@Base 12
+ _D41TypeInfo_HAyaDFC3std3xml13ElementParserZv6__initZ@Base 12
+ _D41TypeInfo_S3std10checkedint13ProperCompare6__initZ@Base 12
+ _D41TypeInfo_S3std11parallelism12AbstractTask6__initZ@Base 12
+ _D41TypeInfo_S3std3uni21DecompressedIntervals6__initZ@Base 12
+ _D41TypeInfo_S3std3uni__T9BitPackedTkVmi11ZQs6__initZ@Base 12
+ _D41TypeInfo_S3std3uni__T9BitPackedTkVmi12ZQs6__initZ@Base 12
+ _D41TypeInfo_S3std3uni__T9BitPackedTkVmi13ZQs6__initZ@Base 12
+ _D41TypeInfo_S3std3uni__T9BitPackedTkVmi14ZQs6__initZ@Base 12
+ _D41TypeInfo_S3std3uni__T9BitPackedTkVmi15ZQs6__initZ@Base 12
+ _D41TypeInfo_S3std3uni__T9BitPackedTkVmi16ZQs6__initZ@Base 12
+ _D41TypeInfo_S3std5regex8internal2ir8BitTable6__initZ@Base 12
+ _D41TypeInfo_S3std5regex8internal2ir8Bytecode6__initZ@Base 12
+ _D41TypeInfo_S3std5regex__T10RegexMatchTAaZQq6__initZ@Base 12
+ _D41TypeInfo_S3std6random18RandomCoverChoices6__initZ@Base 12
+ _D41TypeInfo_S3std8typecons__T5TupleTiTAyaZQn6__initZ@Base 12
+ _D41TypeInfo_S3std8typecons__T5TupleTkTkTkZQn6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5linux3elf10Elf32_Ehdr6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5linux3elf10Elf32_Move6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5linux3elf10Elf32_Nhdr6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5linux3elf10Elf32_Phdr6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5linux3elf10Elf32_Rela6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5linux3elf10Elf32_Shdr6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5linux3elf10Elf64_Ehdr6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5linux3elf10Elf64_Move6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5linux3elf10Elf64_Nhdr6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5linux3elf10Elf64_Phdr6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5linux3elf10Elf64_Rela6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5linux3elf10Elf64_Shdr6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5linux5sched9cpu_set_t6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5posix6signal8sigevent6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5posix6signal8sigset_t6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5posix6signal8sigstack6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5posix6signal8timespec6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5posix9semaphore5sem_t6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5posixQk5ioctl7winsize6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5posixQk6select6fd_set6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5posixQk6socket6linger6__initZ@Base 12
+ _D41TypeInfo_S4core3sys5posixQk6socket6msghdr6__initZ@Base 12
+ _D41TypeInfo_xS3std3net4curl4HTTP10StatusLine6__initZ@Base 12
+ _D41TypeInfo_xS3std5range__T10OnlyResultTaZQp6__initZ@Base 12
+ _D42TypeInfo_AC3std3xml21ProcessingInstruction6__initZ@Base 12
+ _D42TypeInfo_AS3std5regex8internal2ir8Bytecode6__initZ@Base 12
+ _D42TypeInfo_AS3std8typecons__T5TupleTkTkTkZQn6__initZ@Base 12
+ _D42TypeInfo_E3std5regex8internal2ir9RegexInfo6__initZ@Base 12
+ _D42TypeInfo_E3std8datetime8interval9Direction6__initZ@Base 12
+ _D42TypeInfo_E4core4stdc6config13__c_ulonglong6__initZ@Base 12
+ _D42TypeInfo_E4core6thread5fiber5Fiber7Rethrow6__initZ@Base 12
+ _D42TypeInfo_HC4core6thread8osthread6ThreadQBd6__initZ@Base 12
+ _D42TypeInfo_PFZC3std8encoding14EncodingScheme6__initZ@Base 12
+ _D42TypeInfo_S3std3uni__T9sliceBitsVmi0Vmi5ZQt6__initZ@Base 12
+ _D42TypeInfo_S3std3uni__T9sliceBitsVmi0Vmi6ZQt6__initZ@Base 12
+ _D42TypeInfo_S3std3uni__T9sliceBitsVmi0Vmi7ZQt6__initZ@Base 12
+ _D42TypeInfo_S3std3uni__T9sliceBitsVmi0Vmi8ZQt6__initZ@Base 12
+ _D42TypeInfo_S3std3uni__T9sliceBitsVmi0Vmi9ZQt6__initZ@Base 12
+ _D42TypeInfo_S3std5regex__T10RegexMatchTAxaZQr6__initZ@Base 12
+ _D42TypeInfo_S3std7variant__T8VariantNVmi32ZQp6__initZ@Base 12
+ _D42TypeInfo_S4core3sys5linux2fs12fstrim_range6__initZ@Base 12
+ _D42TypeInfo_S4core3sys5linux3elf11Elf32_gptab6__initZ@Base 12
+ _D42TypeInfo_S4core3sys5linux3elf11Elf_Options6__initZ@Base 12
+ _D42TypeInfo_S4core3sys5linux4tipc10tipc_event6__initZ@Base 12
+ _D42TypeInfo_S4core3sys5posix4time10itimerspec6__initZ@Base 12
+ _D42TypeInfo_S4core3sys5posix6signal9siginfo_t6__initZ@Base 12
+ _D42TypeInfo_S4core3sys5posixQk4time9itimerval6__initZ@Base 12
+ _D42TypeInfo_S4core3sys5posixQk5ioctl8termios26__initZ@Base 12
+ _D42TypeInfo_S4core3sys5posixQk6socket7cmsghdr6__initZ@Base 12
+ _D42TypeInfo_S4core8internal8spinlock8SpinLock6__initZ@Base 12
+ _D42TypeInfo_xS3std11parallelism12AbstractTask6__initZ@Base 12
+ _D42TypeInfo_xS3std3uni21DecompressedIntervals6__initZ@Base 12
+ _D42TypeInfo_xS3std5regex8internal2ir8BitTable6__initZ@Base 12
+ _D42TypeInfo_xS3std5regex8internal2ir8Bytecode6__initZ@Base 12
+ _D42TypeInfo_xS3std8typecons__T5TupleTkTkTkZQn6__initZ@Base 12
+ _D43TypeInfo_AxS3std5regex8internal2ir8BitTable6__initZ@Base 12
+ _D43TypeInfo_AxS3std5regex8internal2ir8Bytecode6__initZ@Base 12
+ _D43TypeInfo_AxS3std8typecons__T5TupleTkTkTkZQn6__initZ@Base 12
+ _D43TypeInfo_E3std3net7isemail15EmailStatusCode6__initZ@Base 12
+ _D43TypeInfo_E3std9algorithm10comparison6EditOp6__initZ@Base 12
+ _D43TypeInfo_E4core6thread10threadbase8IsMarked6__initZ@Base 12
+ _D43TypeInfo_E4core6thread10threadbase8ScanType6__initZ@Base 12
+ _D43TypeInfo_E4core8internal2gc2os11ChildStatus6__initZ@Base 12
+ _D43TypeInfo_FS3std3net4curl4HTTP10StatusLineZv6__initZ@Base 12
+ _D43TypeInfo_OS4core8internal8spinlock8SpinLock6__initZ@Base 12
+ _D43TypeInfo_PxS3std11parallelism12AbstractTask6__initZ@Base 12
+ _D43TypeInfo_S2rt4util7utility__T8_ComplexTdZQm6__initZ@Base 12
+ _D43TypeInfo_S2rt4util7utility__T8_ComplexTeZQm6__initZ@Base 12
+ _D43TypeInfo_S2rt4util7utility__T8_ComplexTfZQm6__initZ@Base 12
+ _D43TypeInfo_S2rt9critical_18D_CRITICAL_SECTION6__initZ@Base 12
+ _D43TypeInfo_S3std3uni__T9sliceBitsVmi5Vmi13ZQu6__initZ@Base 12
+ _D43TypeInfo_S3std3uni__T9sliceBitsVmi6Vmi10ZQu6__initZ@Base 12
+ _D43TypeInfo_S3std3uni__T9sliceBitsVmi6Vmi13ZQu6__initZ@Base 12
+ _D43TypeInfo_S3std3uni__T9sliceBitsVmi7Vmi13ZQu6__initZ@Base 12
+ _D43TypeInfo_S3std3uni__T9sliceBitsVmi8Vmi13ZQu6__initZ@Base 12
+ _D43TypeInfo_S3std3uni__T9sliceBitsVmi8Vmi21ZQu6__initZ@Base 12
+ _D43TypeInfo_S3std3uni__T9sliceBitsVmi9Vmi13ZQu6__initZ@Base 12
+ _D43TypeInfo_S3std3uni__T9sliceBitsVmi9Vmi21ZQu6__initZ@Base 12
+ _D43TypeInfo_S3std5array__T8AppenderTAaZQn4Data6__initZ@Base 12
+ _D43TypeInfo_S3std8datetime9stopwatch9StopWatch6__initZ@Base 12
+ _D43TypeInfo_S3std8typecons__T5TupleTeTeTeTeZQp6__initZ@Base 12
+ _D43TypeInfo_S4core3sys5linux2fs13inodes_stat_t6__initZ@Base 12
+ _D43TypeInfo_S4core3sys5linux3elf12Elf32_Verdef6__initZ@Base 12
+ _D43TypeInfo_S4core3sys5linux3elf12Elf32_auxv_t6__initZ@Base 12
+ _D43TypeInfo_S4core3sys5linux3elf12Elf64_Verdef6__initZ@Base 12
+ _D43TypeInfo_S4core3sys5linux3elf12Elf64_auxv_t6__initZ@Base 12
+ _D43TypeInfo_S4core3sys5linux4tipc11tipc_portid6__initZ@Base 12
+ _D43TypeInfo_S4core3sys5linux4tipc11tipc_subscr6__initZ@Base 12
+ _D43TypeInfo_S4core3sys5linux5dlfcn10Dl_serinfo6__initZ@Base 12
+ _D43TypeInfo_S4core3sys5linux5dlfcn10Dl_serpath6__initZ@Base 12
+ _D43TypeInfo_S4core3sys5posix4arpa4inet7in_addr6__initZ@Base 12
+ _D43TypeInfo_S4core3sys5posixQk2un11sockaddr_un6__initZ@Base 12
+ _D43TypeInfo_S4core3sys5posixQk6socket8sockaddr6__initZ@Base 12
+ _D43TypeInfo_S4core3sys5posixQk8resource6rlimit6__initZ@Base 12
+ _D43TypeInfo_S4core3sys5posixQk8resource6rusage6__initZ@Base 12
+ _D43TypeInfo_S4core6thread5types13ll_ThreadData6__initZ@Base 12
+ _D43TypeInfo_xAS3std5regex8internal2ir8BitTable6__initZ@Base 12
+ _D43TypeInfo_xAS3std5regex8internal2ir8Bytecode6__initZ@Base 12
+ _D43TypeInfo_xAS3std8typecons__T5TupleTkTkTkZQn6__initZ@Base 12
+ _D43TypeInfo_xPS3std11parallelism12AbstractTask6__initZ@Base 12
+ _D43TypeInfo_xS3std7variant__T8VariantNVmi32ZQp6__initZ@Base 12
+ _D44TypeInfo_DFS3std3net4curl4HTTP10StatusLineZv6__initZ@Base 12
+ _D44TypeInfo_E2rt4util7utility16__c_complex_real6__initZ@Base 12
+ _D44TypeInfo_E3std6traits21ParameterStorageClass6__initZ@Base 12
+ _D44TypeInfo_E4core6thread7context8Callable4Call6__initZ@Base 12
+ _D44TypeInfo_OS2rt9critical_18D_CRITICAL_SECTION6__initZ@Base 12
+ _D44TypeInfo_S3gcc8sections3elf15CompilerDSOData6__initZ@Base 12
+ _D44TypeInfo_S3gcc9backtrace18SymbolCallbackInfo6__initZ@Base 12
+ _D44TypeInfo_S3std3uni__T9sliceBitsVmi10Vmi14ZQv6__initZ@Base 12
+ _D44TypeInfo_S3std3uni__T9sliceBitsVmi13Vmi21ZQv6__initZ@Base 12
+ _D44TypeInfo_S3std3uni__T9sliceBitsVmi14Vmi21ZQv6__initZ@Base 12
+ _D44TypeInfo_S3std5array__T8AppenderTAxaZQo4Data6__initZ@Base 12
+ _D44TypeInfo_S3std5array__T8AppenderTAyaZQo4Data6__initZ@Base 12
+ _D44TypeInfo_S3std5array__T8AppenderTAyuZQo4Data6__initZ@Base 12
+ _D44TypeInfo_S3std5array__T8AppenderTAywZQo4Data6__initZ@Base 12
+ _D44TypeInfo_S3std5array__T8AppenderTyAaZQo4Data6__initZ@Base 12
+ _D44TypeInfo_S3std5regex8internal2ir10NamedGroup6__initZ@Base 12
+ _D44TypeInfo_S3std5regex8internal6parser7CodeGen6__initZ@Base 12
+ _D44TypeInfo_S3std5stdio4File17LockingTextWriter6__initZ@Base 12
+ _D44TypeInfo_S4core3sys5linux3elf13Elf32_RegInfo6__initZ@Base 12
+ _D44TypeInfo_S4core3sys5linux3elf13Elf32_Syminfo6__initZ@Base 12
+ _D44TypeInfo_S4core3sys5linux3elf13Elf32_Verdaux6__initZ@Base 12
+ _D44TypeInfo_S4core3sys5linux3elf13Elf32_Vernaux6__initZ@Base 12
+ _D44TypeInfo_S4core3sys5linux3elf13Elf32_Verneed6__initZ@Base 12
+ _D44TypeInfo_S4core3sys5linux3elf13Elf64_Syminfo6__initZ@Base 12
+ _D44TypeInfo_S4core3sys5linux3elf13Elf64_Verdaux6__initZ@Base 12
+ _D44TypeInfo_S4core3sys5linux3elf13Elf64_Vernaux6__initZ@Base 12
+ _D44TypeInfo_S4core3sys5linux3elf13Elf64_Verneed6__initZ@Base 12
+ _D44TypeInfo_S4core3sys5linux4link12dl_phdr_info6__initZ@Base 12
+ _D44TypeInfo_S4core3sys5linux5epoll11epoll_event6__initZ@Base 12
+ _D44TypeInfo_S4core3sys5linuxQk7sysinfo8sysinfo_6__initZ@Base 12
+ _D44TypeInfo_S4core3sys5posix5sched11sched_param6__initZ@Base 12
+ _D44TypeInfo_S4core4stdc6config__T8_ComplexTdZQm6__initZ@Base 12
+ _D44TypeInfo_S4core4stdc6config__T8_ComplexTeZQm6__initZ@Base 12
+ _D44TypeInfo_S4core4stdc6config__T8_ComplexTfZQm6__initZ@Base 12
+ _D44TypeInfo_S4core6thread7context12StackContext6__initZ@Base 12
+ _D44TypeInfo_xE3std3net7isemail15EmailStatusCode6__initZ@Base 12
+ _D44TypeInfo_xS2rt4util7utility__T8_ComplexTdZQm6__initZ@Base 12
+ _D44TypeInfo_xS2rt4util7utility__T8_ComplexTeZQm6__initZ@Base 12
+ _D44TypeInfo_xS2rt4util7utility__T8_ComplexTfZQm6__initZ@Base 12
+ _D45TypeInfo_AS3std5regex8internal2ir10NamedGroup6__initZ@Base 12
+ _D45TypeInfo_E2rt4util7utility17__c_complex_float6__initZ@Base 12
+ _D45TypeInfo_E3std5regex8internal2ir11RegexOption6__initZ@Base 12
+ _D45TypeInfo_E4core4stdc6config16__c_complex_real6__initZ@Base 12
+ _D45TypeInfo_E4core8internal7convert11FloatFormat6__initZ@Base 12
+ _D45TypeInfo_E6object14TypeInfo_Class10ClassFlags6__initZ@Base 12
+ _D45TypeInfo_S3gcc12libbacktrace15backtrace_state6__initZ@Base 12
+ _D45TypeInfo_S3gcc9backtrace19SymbolCallbackInfo26__initZ@Base 12
+ _D45TypeInfo_S3std3uni__T13PackedPtrImplThVmi8ZQw6__initZ@Base 12
+ _D45TypeInfo_S3std5array__T8AppenderTAAyaZQp4Data6__initZ@Base 12
+ _D45TypeInfo_S3std5regex8internal2ir11CharMatcher6__initZ@Base 12
+ _D45TypeInfo_S3std8typecons__T5TupleTAyaTQeTQhZQr6__initZ@Base 12
+ _D45TypeInfo_S4core3sys5linux3elf14Elf_Options_Hw6__initZ@Base 12
+ _D45TypeInfo_S4core3sys5linux3elf9Elf32_Dyn5_d_un6__initZ@Base 12
+ _D45TypeInfo_S4core3sys5linux3elf9Elf64_Dyn5_d_un6__initZ@Base 12
+ _D45TypeInfo_S4core3sys5linux4tipc13sockaddr_tipc6__initZ@Base 12
+ _D45TypeInfo_S4core3sys5linux4tipc13tipc_name_seq6__initZ@Base 12
+ _D45TypeInfo_S4core3sys5linux5epoll12epoll_data_t6__initZ@Base 12
+ _D45TypeInfo_S4core3sys5posix6signal11sigaction_t6__initZ@Base 12
+ _D45TypeInfo_S4core3sys5posixQk7statvfs9statvfs_t6__initZ@Base 12
+ _D45TypeInfo_S4core8internal12parseoptions6MemVal6__initZ@Base 12
+ _D45TypeInfo_S4core8internal9container5treap4Rand6__initZ@Base 12
+ _D45TypeInfo_xDFS3std3net4curl4HTTP10StatusLineZv6__initZ@Base 12
+ _D45TypeInfo_xS3std5regex8internal2ir10NamedGroup6__initZ@Base 12
+ _D45TypeInfo_xS3std5regex8internal6parser7CodeGen6__initZ@Base 12
+ _D46TypeInfo_AxS3std5regex8internal2ir10NamedGroup6__initZ@Base 12
+ _D46TypeInfo_E2rt4util7utility18__c_complex_double6__initZ@Base 12
+ _D46TypeInfo_E3std11parallelism8TaskPool9PoolState6__initZ@Base 12
+ _D46TypeInfo_E4core4stdc6config17__c_complex_float6__initZ@Base 12
+ _D46TypeInfo_HAyaPFZC3std8encoding14EncodingScheme6__initZ@Base 12
+ _D46TypeInfo_S3std3uni7unicode18hangulSyllableType6__initZ@Base 12
+ _D46TypeInfo_S3std3uni__T13PackedPtrImplTtVmi16ZQx6__initZ@Base 12
+ _D46TypeInfo_S3std4file15DirIteratorImpl9DirHandle6__initZ@Base 12
+ _D46TypeInfo_S3std5range__T4iotaTmTmZQkFmmZ6Result6__initZ@Base 12
+ _D46TypeInfo_S3std5regex8internal2ir__T5GroupTmZQj6__initZ@Base 12
+ _D46TypeInfo_S3std5regex8internal2ir__T5InputTaZQj6__initZ@Base 12
+ _D46TypeInfo_S3std5regex8internal2ir__T5RegexTaZQj6__initZ@Base 12
+ _D46TypeInfo_S3std6format4spec__T10FormatSpecTaZQp6__initZ@Base 12
+ _D46TypeInfo_S3std6traits23__InoutWorkaroundStruct6__initZ@Base 12
+ _D46TypeInfo_S3std8bitmanip__T13EndianSwapperTaZQs6__initZ@Base 12
+ _D46TypeInfo_S3std8bitmanip__T13EndianSwapperTbZQs6__initZ@Base 12
+ _D46TypeInfo_S3std8bitmanip__T13EndianSwapperThZQs6__initZ@Base 12
+ _D46TypeInfo_S3std8bitmanip__T13EndianSwapperTiZQs6__initZ@Base 12
+ _D46TypeInfo_S3std8bitmanip__T13EndianSwapperTkZQs6__initZ@Base 12
+ _D46TypeInfo_S3std8bitmanip__T13EndianSwapperTlZQs6__initZ@Base 12
+ _D46TypeInfo_S3std8bitmanip__T13EndianSwapperTmZQs6__initZ@Base 12
+ _D46TypeInfo_S3std8bitmanip__T13EndianSwapperTtZQs6__initZ@Base 12
+ _D46TypeInfo_S3std8internal14unicode_tables6blocks6__initZ@Base 12
+ _D46TypeInfo_S3std8internal14unicode_tables6hangul6__initZ@Base 12
+ _D46TypeInfo_S4core3sys5linux2fs16file_clone_range6__initZ@Base 12
+ _D46TypeInfo_S4core3sys5posix7netinet3in_8in6_addr6__initZ@Base 12
+ _D46TypeInfo_S4core3sys5posix8ucontext10mcontext_t6__initZ@Base 12
+ _D46TypeInfo_S4core3sys5posix8ucontext10ucontext_t6__initZ@Base 12
+ _D46TypeInfo_S4core4stdc6wchar_9mbstate_t8___value6__initZ@Base 12
+ _D46TypeInfo_S4core4sync5mutex5Mutex12MonitorProxy6__initZ@Base 12
+ _D46TypeInfo_xAS3std5regex8internal2ir10NamedGroup6__initZ@Base 12
+ _D46TypeInfo_xS3std5regex8internal2ir11CharMatcher6__initZ@Base 12
+ _D46TypeInfo_xS3std8typecons__T5TupleTAyaTQeTQhZQr6__initZ@Base 12
+ _D47TypeInfo_AC3std11parallelism17ParallelismThread6__initZ@Base 12
+ _D47TypeInfo_AC4core6thread10threadbase10ThreadBase6__initZ@Base 12
+ _D47TypeInfo_AS3std4file15DirIteratorImpl9DirHandle6__initZ@Base 12
+ _D47TypeInfo_AxS3std5regex8internal2ir11CharMatcher6__initZ@Base 12
+ _D47TypeInfo_E3std7variant__T8VariantNVmi32ZQp4OpID6__initZ@Base 12
+ _D47TypeInfo_E3std8internal4test10dummyrange6Length6__initZ@Base 12
+ _D47TypeInfo_E3std9algorithm8mutation12SwapStrategy6__initZ@Base 12
+ _D47TypeInfo_E4core4stdc6config18__c_complex_double6__initZ@Base 12
+ _D47TypeInfo_E4core6stdcpp6string16DefaultConstruct6__initZ@Base 12
+ _D47TypeInfo_E4core6stdcpp6vector16DefaultConstruct6__initZ@Base 12
+ _D47TypeInfo_E4core6stdcpp8xutility14CppStdRevision6__initZ@Base 12
+ _D47TypeInfo_E6object15TypeInfo_Struct11StructFlags6__initZ@Base 12
+ _D47TypeInfo_S3std6digest3sha__T3SHAVki512Vki160ZQr6__initZ@Base 12
+ _D47TypeInfo_S3std6digest3sha__T3SHAVki512Vki224ZQr6__initZ@Base 12
+ _D47TypeInfo_S3std6digest3sha__T3SHAVki512Vki256ZQr6__initZ@Base 12
+ _D47TypeInfo_S3std8datetime8timezone13TZConversions6__initZ@Base 12
+ _D47TypeInfo_S3std8internal14unicode_tables7scripts6__initZ@Base 12
+ _D47TypeInfo_S4core3sys5linux2fs17file_dedupe_range6__initZ@Base 12
+ _D47TypeInfo_S4core3sys5linux2fs17files_stat_struct6__initZ@Base 12
+ _D47TypeInfo_S4core3sys5linuxQk5prctl12prctl_mm_map6__initZ@Base 12
+ _D47TypeInfo_S4core3sys5posix6setjmp13__jmp_buf_tag6__initZ@Base 12
+ _D47TypeInfo_S4core3sys5posix7netinet3in_9ipv6_mreq6__initZ@Base 12
+ _D47TypeInfo_S4core6thread8osthread6Thread8Priority6__initZ@Base 12
+ _D47TypeInfo_S6object15TypeInfo_Struct11_memberFunc6__initZ@Base 12
+ _D47TypeInfo_xAS3std5regex8internal2ir11CharMatcher6__initZ@Base 12
+ _D47TypeInfo_xS3std4file15DirIteratorImpl9DirHandle6__initZ@Base 12
+ _D47TypeInfo_xS3std5regex8internal2ir__T5GroupTmZQj6__initZ@Base 12
+ _D47TypeInfo_xS3std5regex8internal2ir__T5RegexTaZQj6__initZ@Base 12
+ _D48TypeInfo_AxS3std4file15DirIteratorImpl9DirHandle6__initZ@Base 12
+ _D48TypeInfo_AxS3std5regex8internal2ir__T5GroupTmZQj6__initZ@Base 12
+ _D48TypeInfo_E3std4uuid20UUIDParsingException6Reason6__initZ@Base 12
+ _D48TypeInfo_S3gcc6unwind7generic17_Unwind_Exception6__initZ@Base 12
+ _D48TypeInfo_S3std3uni__T8CowArrayTSQwQu8GcPolicyZQz6__initZ@Base 12
+ _D48TypeInfo_S3std5range__T4iotaTmTxmZQlFmxmZ6Result6__initZ@Base 12
+ _D48TypeInfo_S3std5range__T6RepeatTaZQk11DollarToken6__initZ@Base 12
+ _D48TypeInfo_S3std6digest3sha__T3SHAVki1024Vki224ZQs6__initZ@Base 12
+ _D48TypeInfo_S3std6digest3sha__T3SHAVki1024Vki256ZQs6__initZ@Base 12
+ _D48TypeInfo_S3std6digest3sha__T3SHAVki1024Vki384ZQs6__initZ@Base 12
+ _D48TypeInfo_S3std6digest3sha__T3SHAVki1024Vki512ZQs6__initZ@Base 12
+ _D48TypeInfo_S3std8internal14unicode_tables8uniProps6__initZ@Base 12
+ _D48TypeInfo_S3std8internal4test10dummyrange7TestFoo6__initZ@Base 12
+ _D48TypeInfo_S4core3sys5linux8io_uring12io_uring_cqe6__initZ@Base 12
+ _D48TypeInfo_S4core3sys5linux8io_uring12io_uring_sqe6__initZ@Base 12
+ _D48TypeInfo_S4core3sys5posix8ucontext12_libc_fpxreg6__initZ@Base 12
+ _D48TypeInfo_S4core3sys5posix8ucontext12_libc_xmmreg6__initZ@Base 12
+ _D48TypeInfo_xAS3std4file15DirIteratorImpl9DirHandle6__initZ@Base 12
+ _D48TypeInfo_xAS3std5regex8internal2ir__T5GroupTmZQj6__initZ@Base 12
+ _D49TypeInfo_E3std12experimental6logger4core8LogLevel6__initZ@Base 12
+ _D49TypeInfo_E3std8internal4test10dummyrange8ReturnBy6__initZ@Base 12
+ _D49TypeInfo_E3std8typecons24RefCountedAutoInitialize6__initZ@Base 12
+ _D49TypeInfo_E4core3sys5linux10perf_event10perf_hw_id6__initZ@Base 12
+ _D49TypeInfo_S3std12experimental6logger4core8MsgRange6__initZ@Base 12
+ _D49TypeInfo_S3std3uni18simpleCaseFoldingsFNfwZ5Range6__initZ@Base 12
+ _D49TypeInfo_S3std5range__T6ChunksTAhZQl11DollarToken6__initZ@Base 12
+ _D49TypeInfo_S3std8internal14unicode_tables9CompEntry6__initZ@Base 12
+ _D49TypeInfo_S3std8internal4math11biguintcore7BigUint6__initZ@Base 12
+ _D49TypeInfo_S3std8typecons__T5TupleTC8TypeInfoTPvZQv6__initZ@Base 12
+ _D49TypeInfo_S4core3sys5linux3elf12Elf32_auxv_t5_a_un6__initZ@Base 12
+ _D49TypeInfo_S4core3sys5linux3elf12Elf64_auxv_t5_a_un6__initZ@Base 12
+ _D49TypeInfo_S4core3sys5posix3net3if_14if_nameindex_t6__initZ@Base 12
+ _D49TypeInfo_S4core3sys5posix8ucontext13_libc_fpstate6__initZ@Base 12
+ _D49TypeInfo_S4core3sys5posixQk5types14pthread_attr_t6__initZ@Base 12
+ _D49TypeInfo_S4core3sys5posixQk5types14pthread_cond_t6__initZ@Base 12
+ _D49TypeInfo_xS3gcc6unwind7generic17_Unwind_Exception6__initZ@Base 12
+ _D49TypeInfo_xS3std3uni__T8CowArrayTSQwQu8GcPolicyZQz6__initZ@Base 12
+ _D4core10checkedint11__moduleRefZ@Base 12
+ _D4core10checkedint12__ModuleInfoZ@Base 12
+ _D4core2gc11gcinterface11__moduleRefZ@Base 12
+ _D4core2gc11gcinterface12__ModuleInfoZ@Base 12
+ _D4core2gc11gcinterface2GC11__InterfaceZ@Base 12
+ _D4core2gc11gcinterface4Root6__initZ@Base 12
+ _D4core2gc11gcinterface5Range11__xopEqualsMxFKxSQBuQBsQBsQBiZb@Base 12
+ _D4core2gc11gcinterface5Range6__initZ@Base 12
+ _D4core2gc11gcinterface5Range8opEqualsMxFNbMxSQBsQBqQBqQBgZb@Base 12
+ _D4core2gc11gcinterface5Range9__xtoHashFNbNeKxSQBtQBrQBrQBhZm@Base 12
+ _D4core2gc6config11PrettyBytes6__initZ@Base 12
+ _D4core2gc6config11__moduleRefZ@Base 12
+ _D4core2gc6config11prettyBytesFNaNbNiKmZa@Base 12
+ _D4core2gc6config12__ModuleInfoZ@Base 12
+ _D4core2gc6config18bytes2prettyStructFNaNbNimZSQBtQBrQBr11PrettyBytes@Base 12
+ _D4core2gc6config6Config10initializeMFNbNiZb@Base 12
+ _D4core2gc6config6Config11__xopEqualsMxFKxSQBpQBnQBnQBjZb@Base 12
+ _D4core2gc6config6Config4helpMFNbNiZv@Base 12
+ _D4core2gc6config6Config6__initZ@Base 12
+ _D4core2gc6config6Config9__xtoHashFNbNeKxSQBoQBmQBmQBiZm@Base 12
+ _D4core2gc6config6Config9errorNameMFNbNiZAya@Base 12
+ _D4core2gc6configQhSQsQpQo6Config@Base 12
+ _D4core2gc8registry11__moduleRefZ@Base 12
+ _D4core2gc8registry12__ModuleInfoZ@Base 12
+ _D4core2gc8registry16createGCInstanceFAyaZCQBpQBn11gcinterface2GC@Base 12
+ _D4core2gc8registry17registerGCFactoryFNbNiAyaPFZCQBwQBu11gcinterface2GCZv@Base 12
+ _D4core2gc8registry21registeredGCFactoriesFNbNiiZxASQByQBwQBw5Entry@Base 12
+ _D4core2gc8registry5Entry11__xopEqualsMxFKxSQBqQBoQBoQBiZb@Base 12
+ _D4core2gc8registry5Entry6__initZ@Base 12
+ _D4core2gc8registry5Entry9__xtoHashFNbNeKxSQBpQBnQBnQBhZm@Base 12
+ _D4core2gc8registry7entriesASQBbQzQy5Entry@Base 12
+ _D4core3sys5linux10perf_event11__moduleRefZ@Base 12
+ _D4core3sys5linux10perf_event12__ModuleInfoZ@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr10exclude_hvMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr10exclude_hvMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr10namespacesMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr10namespacesMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr10precise_ipMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr10precise_ipMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr11use_clockidMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr11use_clockidMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr12__reserved_1MUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr12__reserved_1MxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr12exclude_hostMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr12exclude_hostMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr12exclude_idleMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr12exclude_idleMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr12exclude_userMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr12exclude_userMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr12inherit_statMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr12inherit_statMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr13exclude_guestMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr13exclude_guestMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr13sample_id_allMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr13sample_id_allMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr14context_switchMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr14context_switchMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr14enable_on_execMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr14enable_on_execMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr14exclude_kernelMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr14exclude_kernelMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr14write_backwardMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr14write_backwardMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr22exclude_callchain_userMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr22exclude_callchain_userMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr24exclude_callchain_kernelMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr24exclude_callchain_kernelMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr4commMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr4commMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr4freqMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr4freqMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr4mmapMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr4mmapMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr4taskMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr4taskMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr5mmap2MUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr5mmap2MxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr6__initZ@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr6pinnedMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr6pinnedMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr7inheritMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr7inheritMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr8disabledMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr8disabledMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr9comm_execMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr9comm_execMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr9exclusiveMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr9exclusiveMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr9mmap_dataMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr9mmap_dataMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr9watermarkMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event15perf_event_attr9watermarkMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event15perf_event_type6__initZ@Base 12
+ _D4core3sys5linux10perf_event17perf_branch_entry4typeMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_branch_entry4typeMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_branch_entry5abortMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_branch_entry5abortMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_branch_entry5in_txMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_branch_entry5in_txMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_branch_entry6__initZ@Base 12
+ _D4core3sys5linux10perf_event17perf_branch_entry6cyclesMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_branch_entry6cyclesMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_branch_entry7mispredMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_branch_entry7mispredMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_branch_entry8reservedMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_branch_entry8reservedMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_branch_entry9predictedMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_branch_entry9predictedMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_event_header6__initZ@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src10mem_remoteMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src10mem_remoteMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src10mem_snoopxMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src10mem_snoopxMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src11mem_lvl_numMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src11mem_lvl_numMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src6__initZ@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src6mem_opMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src6mem_opMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src7mem_lvlMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src7mem_lvlMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src8mem_dtlbMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src8mem_dtlbMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src8mem_lockMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src8mem_lockMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src8mem_rsvdMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src8mem_rsvdMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src9mem_snoopMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event17perf_mem_data_src9mem_snoopMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event17perf_ns_link_info6__initZ@Base 12
+ _D4core3sys5linux10perf_event20perf_event_ioc_flags6__initZ@Base 12
+ _D4core3sys5linux10perf_event20perf_event_mmap_page11cap_____resMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event20perf_event_mmap_page11cap_____resMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event20perf_event_mmap_page13cap_user_timeMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event20perf_event_mmap_page13cap_user_timeMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event20perf_event_mmap_page14cap_user_rdpmcMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event20perf_event_mmap_page14cap_user_rdpmcMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event20perf_event_mmap_page18cap_user_time_zeroMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event20perf_event_mmap_page18cap_user_time_zeroMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event20perf_event_mmap_page22cap_bit0_is_deprecatedMUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event20perf_event_mmap_page22cap_bit0_is_deprecatedMxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event20perf_event_mmap_page6__initZ@Base 12
+ _D4core3sys5linux10perf_event20perf_event_mmap_page8cap_bit0MUNaNbNdNiNfmZv@Base 12
+ _D4core3sys5linux10perf_event20perf_event_mmap_page8cap_bit0MxUNaNbNdNiNfZm@Base 12
+ _D4core3sys5linux10perf_event22perf_callchain_context6__initZ@Base 12
+ _D4core3sys5linux10perf_event22perf_event_read_format6__initZ@Base 12
+ _D4core3sys5linux10perf_event23perf_branch_sample_type6__initZ@Base 12
+ _D4core3sys5linux10perf_event24perf_event_sample_format6__initZ@Base 12
+ _D4core3sys5linux2fs11__moduleRefZ@Base 12
+ _D4core3sys5linux2fs12__ModuleInfoZ@Base 12
+ _D4core3sys5linux2fs12fstrim_range6__initZ@Base 12
+ _D4core3sys5linux2fs13inodes_stat_t6__initZ@Base 12
+ _D4core3sys5linux2fs16file_clone_range6__initZ@Base 12
+ _D4core3sys5linux2fs17file_dedupe_range6__initZ@Base 12
+ _D4core3sys5linux2fs17files_stat_struct6__initZ@Base 12
+ _D4core3sys5linux2fs22file_dedupe_range_info6__initZ@Base 12
+ _D4core3sys5linux2fs7fsxattr6__initZ@Base 12
+ _D4core3sys5linux3elf10Elf32_Ehdr6__initZ@Base 12
+ _D4core3sys5linux3elf10Elf32_Move6__initZ@Base 12
+ _D4core3sys5linux3elf10Elf32_Nhdr6__initZ@Base 12
+ _D4core3sys5linux3elf10Elf32_Phdr6__initZ@Base 12
+ _D4core3sys5linux3elf10Elf32_Rela6__initZ@Base 12
+ _D4core3sys5linux3elf10Elf32_Shdr6__initZ@Base 12
+ _D4core3sys5linux3elf10Elf64_Ehdr6__initZ@Base 12
+ _D4core3sys5linux3elf10Elf64_Move6__initZ@Base 12
+ _D4core3sys5linux3elf10Elf64_Nhdr6__initZ@Base 12
+ _D4core3sys5linux3elf10Elf64_Phdr6__initZ@Base 12
+ _D4core3sys5linux3elf10Elf64_Rela6__initZ@Base 12
+ _D4core3sys5linux3elf10Elf64_Shdr6__initZ@Base 12
+ _D4core3sys5linux3elf11Elf32_gptab10_gt_header6__initZ@Base 12
+ _D4core3sys5linux3elf11Elf32_gptab6__initZ@Base 12
+ _D4core3sys5linux3elf11Elf32_gptab9_gt_entry6__initZ@Base 12
+ _D4core3sys5linux3elf11Elf_Options6__initZ@Base 12
+ _D4core3sys5linux3elf11__moduleRefZ@Base 12
+ _D4core3sys5linux3elf12Elf32_Verdef6__initZ@Base 12
+ _D4core3sys5linux3elf12Elf32_auxv_t5_a_un6__initZ@Base 12
+ _D4core3sys5linux3elf12Elf32_auxv_t6__initZ@Base 12
+ _D4core3sys5linux3elf12Elf64_Verdef6__initZ@Base 12
+ _D4core3sys5linux3elf12Elf64_auxv_t5_a_un6__initZ@Base 12
+ _D4core3sys5linux3elf12Elf64_auxv_t6__initZ@Base 12
+ _D4core3sys5linux3elf12__ModuleInfoZ@Base 12
+ _D4core3sys5linux3elf13Elf32_RegInfo6__initZ@Base 12
+ _D4core3sys5linux3elf13Elf32_Syminfo6__initZ@Base 12
+ _D4core3sys5linux3elf13Elf32_Verdaux6__initZ@Base 12
+ _D4core3sys5linux3elf13Elf32_Vernaux6__initZ@Base 12
+ _D4core3sys5linux3elf13Elf32_Verneed6__initZ@Base 12
+ _D4core3sys5linux3elf13Elf64_Syminfo6__initZ@Base 12
+ _D4core3sys5linux3elf13Elf64_Verdaux6__initZ@Base 12
+ _D4core3sys5linux3elf13Elf64_Vernaux6__initZ@Base 12
+ _D4core3sys5linux3elf13Elf64_Verneed6__initZ@Base 12
+ _D4core3sys5linux3elf14Elf_Options_Hw6__initZ@Base 12
+ _D4core3sys5linux3elf9Elf32_Dyn5_d_un6__initZ@Base 12
+ _D4core3sys5linux3elf9Elf32_Dyn6__initZ@Base 12
+ _D4core3sys5linux3elf9Elf32_Lib6__initZ@Base 12
+ _D4core3sys5linux3elf9Elf32_Rel6__initZ@Base 12
+ _D4core3sys5linux3elf9Elf32_Sym6__initZ@Base 12
+ _D4core3sys5linux3elf9Elf64_Dyn5_d_un6__initZ@Base 12
+ _D4core3sys5linux3elf9Elf64_Dyn6__initZ@Base 12
+ _D4core3sys5linux3elf9Elf64_Lib6__initZ@Base 12
+ _D4core3sys5linux3elf9Elf64_Rel6__initZ@Base 12
+ _D4core3sys5linux3elf9Elf64_Sym6__initZ@Base 12
+ _D4core3sys5linux3err11__moduleRefZ@Base 12
+ _D4core3sys5linux3err12__ModuleInfoZ@Base 12
+ _D4core3sys5linux4link11__moduleRefZ@Base 12
+ _D4core3sys5linux4link12__ModuleInfoZ@Base 12
+ _D4core3sys5linux4link12dl_phdr_info6__initZ@Base 12
+ _D4core3sys5linux4link7r_debug6__initZ@Base 12
+ _D4core3sys5linux4link8link_map6__initZ@Base 12
+ _D4core3sys5linux4time11__moduleRefZ@Base 12
+ _D4core3sys5linux4time12__ModuleInfoZ@Base 12
+ _D4core3sys5linux4tipc10tipc_event6__initZ@Base 12
+ _D4core3sys5linux4tipc11__moduleRefZ@Base 12
+ _D4core3sys5linux4tipc11tipc_portid6__initZ@Base 12
+ _D4core3sys5linux4tipc11tipc_subscr6__initZ@Base 12
+ _D4core3sys5linux4tipc12__ModuleInfoZ@Base 12
+ _D4core3sys5linux4tipc13sockaddr_tipc4Addr4Name6__initZ@Base 12
+ _D4core3sys5linux4tipc13sockaddr_tipc4Addr6__initZ@Base 12
+ _D4core3sys5linux4tipc13sockaddr_tipc6__initZ@Base 12
+ _D4core3sys5linux4tipc13tipc_name_seq6__initZ@Base 12
+ _D4core3sys5linux4tipc9tipc_name6__initZ@Base 12
+ _D4core3sys5linux5dlfcn10Dl_serinfo6__initZ@Base 12
+ _D4core3sys5linux5dlfcn10Dl_serpath6__initZ@Base 12
+ _D4core3sys5linux5dlfcn11__moduleRefZ@Base 12
+ _D4core3sys5linux5dlfcn12__ModuleInfoZ@Base 12
+ _D4core3sys5linux5epoll11__moduleRefZ@Base 12
+ _D4core3sys5linux5epoll11epoll_event6__initZ@Base 12
+ _D4core3sys5linux5epoll12__ModuleInfoZ@Base 12
+ _D4core3sys5linux5epoll12epoll_data_t6__initZ@Base 12
+ _D4core3sys5linux5errno11__moduleRefZ@Base 12
+ _D4core3sys5linux5errno12__ModuleInfoZ@Base 12
+ _D4core3sys5linux5fcntl11__moduleRefZ@Base 12
+ _D4core3sys5linux5fcntl12__ModuleInfoZ@Base 12
+ _D4core3sys5linux5sched11__moduleRefZ@Base 12
+ _D4core3sys5linux5sched12__ModuleInfoZ@Base 12
+ _D4core3sys5linux5sched9cpu_set_t6__initZ@Base 12
+ _D4core3sys5linux5stdio11__moduleRefZ@Base 12
+ _D4core3sys5linux5stdio12__ModuleInfoZ@Base 12
+ _D4core3sys5linux5stdio21cookie_io_functions_t6__initZ@Base 12
+ _D4core3sys5linux6config11__moduleRefZ@Base 12
+ _D4core3sys5linux6config12__ModuleInfoZ@Base 12
+ _D4core3sys5linux6string11__moduleRefZ@Base 12
+ _D4core3sys5linux6string12__ModuleInfoZ@Base 12
+ _D4core3sys5linux6unistd11__moduleRefZ@Base 12
+ _D4core3sys5linux6unistd12__ModuleInfoZ@Base 12
+ _D4core3sys5linux7ifaddrs11__moduleRefZ@Base 12
+ _D4core3sys5linux7ifaddrs12__ModuleInfoZ@Base 12
+ _D4core3sys5linux7ifaddrsQi6__initZ@Base 12
+ _D4core3sys5linux7netinet3in_11IN_BADCLASSFNaNbNiNfkZb@Base 12
+ _D4core3sys5linux7netinet3in_11__moduleRefZ@Base 12
+ _D4core3sys5linux7netinet3in_12IN_MULTICASTFNbNikZb@Base 12
+ _D4core3sys5linux7netinet3in_12__ModuleInfoZ@Base 12
+ _D4core3sys5linux7netinet3in_15IN_EXPERIMENTALFNaNbNiNfkZb@Base 12
+ _D4core3sys5linux7netinet3in_18IN6_ARE_ADDR_EQUALFNaNbNiNfPSQCgQCe5posixQCdQBy8in6_addrQBdZb@Base 12
+ _D4core3sys5linux7netinet3in_9IN_CLASSAFNaNbNiNfkZb@Base 12
+ _D4core3sys5linux7netinet3in_9IN_CLASSBFNaNbNiNfkZb@Base 12
+ _D4core3sys5linux7netinet3in_9IN_CLASSCFNaNbNiNfkZb@Base 12
+ _D4core3sys5linux7netinet3in_9IN_CLASSDFNaNbNiNfkZb@Base 12
+ _D4core3sys5linux7netinet3tcp11__moduleRefZ@Base 12
+ _D4core3sys5linux7netinet3tcp12__ModuleInfoZ@Base 12
+ _D4core3sys5linux7termios11__moduleRefZ@Base 12
+ _D4core3sys5linux7termios12__ModuleInfoZ@Base 12
+ _D4core3sys5linux7timerfd11__moduleRefZ@Base 12
+ _D4core3sys5linux7timerfd12__ModuleInfoZ@Base 12
+ _D4core3sys5linux8execinfo11__moduleRefZ@Base 12
+ _D4core3sys5linux8execinfo12__ModuleInfoZ@Base 12
+ _D4core3sys5linux8io_uring11__moduleRefZ@Base 12
+ _D4core3sys5linux8io_uring12__ModuleInfoZ@Base 12
+ _D4core3sys5linux8io_uring12io_uring_cqe6__initZ@Base 12
+ _D4core3sys5linux8io_uring12io_uring_sqe6__initZ@Base 12
+ _D4core3sys5linux8io_uring14io_uring_probe6__initZ@Base 12
+ _D4core3sys5linux8io_uring15io_uring_params6__initZ@Base 12
+ _D4core3sys5linux8io_uring17io_cqring_offsets6__initZ@Base 12
+ _D4core3sys5linux8io_uring17io_sqring_offsets6__initZ@Base 12
+ _D4core3sys5linux8io_uring17io_uring_probe_op6__initZ@Base 12
+ _D4core3sys5linux8io_uring20io_uring_restriction6__initZ@Base 12
+ _D4core3sys5linux8io_uring21io_uring_files_update6__initZ@Base 12
+ _D4core3sys5linux8io_uring22io_uring_getevents_arg6__initZ@Base 12
+ _D4core3sys5linuxQk4auxv11__moduleRefZ@Base 12
+ _D4core3sys5linuxQk4auxv12__ModuleInfoZ@Base 12
+ _D4core3sys5linuxQk4file11__moduleRefZ@Base 12
+ _D4core3sys5linuxQk4file12__ModuleInfoZ@Base 12
+ _D4core3sys5linuxQk4mman11__moduleRefZ@Base 12
+ _D4core3sys5linuxQk4mman12__ModuleInfoZ@Base 12
+ _D4core3sys5linuxQk4time10timerclearFNaNbNiNfPSQBtQBr5posixQCaQBr7timevalZv@Base 12
+ _D4core3sys5linuxQk4time10timerissetFNaNbNiNfPSQBtQBr5posixQCaQBr7timevalZi@Base 12
+ _D4core3sys5linuxQk4time11__moduleRefZ@Base 12
+ _D4core3sys5linuxQk4time12__ModuleInfoZ@Base 12
+ _D4core3sys5linuxQk4time8timeraddFNaNbNiNfxPSQBrQBp5posixQByQBp7timevalxQBdPSQCxQCvQBgQDbQCsQBdZv@Base 12
+ _D4core3sys5linuxQk4time8timersubFNaNbNiNfxPSQBrQBp5posixQByQBp7timevalxQBdPSQCxQCvQBgQDbQCsQBdZv@Base 12
+ _D4core3sys5linuxQk5prctl11__moduleRefZ@Base 12
+ _D4core3sys5linuxQk5prctl12__ModuleInfoZ@Base 12
+ _D4core3sys5linuxQk5prctl12prctl_mm_map6__initZ@Base 12
+ _D4core3sys5linuxQk5xattr11__moduleRefZ@Base 12
+ _D4core3sys5linuxQk5xattr12__ModuleInfoZ@Base 12
+ _D4core3sys5linuxQk6procfs11__moduleRefZ@Base 12
+ _D4core3sys5linuxQk6procfs12__ModuleInfoZ@Base 12
+ _D4core3sys5linuxQk6socket11__moduleRefZ@Base 12
+ _D4core3sys5linuxQk6socket12__ModuleInfoZ@Base 12
+ _D4core3sys5linuxQk7eventfd11__moduleRefZ@Base 12
+ _D4core3sys5linuxQk7eventfd12__ModuleInfoZ@Base 12
+ _D4core3sys5linuxQk7inotify11__moduleRefZ@Base 12
+ _D4core3sys5linuxQk7inotify12__ModuleInfoZ@Base 12
+ _D4core3sys5linuxQk7inotify13inotify_event14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core3sys5linuxQk7inotify13inotify_event6__initZ@Base 12
+ _D4core3sys5linuxQk7inotify13inotify_event8opAssignMFNaNbNcNiNjNeSQCmQCkQCjQCqQChQCcZQu@Base 12
+ _D4core3sys5linuxQk7sysinfo11__moduleRefZ@Base 12
+ _D4core3sys5linuxQk7sysinfo12__ModuleInfoZ@Base 12
+ _D4core3sys5linuxQk7sysinfo8sysinfo_6__initZ@Base 12
+ _D4core3sys5linuxQk8signalfd11__moduleRefZ@Base 12
+ _D4core3sys5linuxQk8signalfd12__ModuleInfoZ@Base 12
+ _D4core3sys5linuxQk8signalfd16signalfd_siginfo6__initZ@Base 12
+ _D4core3sys5posix3aio11__moduleRefZ@Base 12
+ _D4core3sys5posix3aio12__ModuleInfoZ@Base 12
+ _D4core3sys5posix3aio5aiocb6__initZ@Base 12
+ _D4core3sys5posix3aio7aiocb646__initZ@Base 12
+ _D4core3sys5posix3grp11__moduleRefZ@Base 12
+ _D4core3sys5posix3grp12__ModuleInfoZ@Base 12
+ _D4core3sys5posix3grp5group6__initZ@Base 12
+ _D4core3sys5posix3net3if_11__moduleRefZ@Base 12
+ _D4core3sys5posix3net3if_12__ModuleInfoZ@Base 12
+ _D4core3sys5posix3net3if_14if_nameindex_t6__initZ@Base 12
+ _D4core3sys5posix3pwd11__moduleRefZ@Base 12
+ _D4core3sys5posix3pwd12__ModuleInfoZ@Base 12
+ _D4core3sys5posix3pwd6passwd6__initZ@Base 12
+ _D4core3sys5posix4arpa4inet11__moduleRefZ@Base 12
+ _D4core3sys5posix4arpa4inet12__ModuleInfoZ@Base 12
+ _D4core3sys5posix4arpa4inet7in_addr6__initZ@Base 12
+ _D4core3sys5posix4poll11__moduleRefZ@Base 12
+ _D4core3sys5posix4poll12__ModuleInfoZ@Base 12
+ _D4core3sys5posix4poll6pollfd6__initZ@Base 12
+ _D4core3sys5posix4stdc4time11__moduleRefZ@Base 12
+ _D4core3sys5posix4stdc4time12__ModuleInfoZ@Base 12
+ _D4core3sys5posix4stdc4time2tm6__initZ@Base 12
+ _D4core3sys5posix4time10itimerspec6__initZ@Base 12
+ _D4core3sys5posix4time11__moduleRefZ@Base 12
+ _D4core3sys5posix4time12__ModuleInfoZ@Base 12
+ _D4core3sys5posix5dlfcn11__moduleRefZ@Base 12
+ _D4core3sys5posix5dlfcn12__ModuleInfoZ@Base 12
+ _D4core3sys5posix5dlfcn7Dl_info6__initZ@Base 12
+ _D4core3sys5posix5fcntl11__moduleRefZ@Base 12
+ _D4core3sys5posix5fcntl12__ModuleInfoZ@Base 12
+ _D4core3sys5posix5fcntl5flock6__initZ@Base 12
+ _D4core3sys5posix5iconv11__moduleRefZ@Base 12
+ _D4core3sys5posix5iconv12__ModuleInfoZ@Base 12
+ _D4core3sys5posix5netdb11__moduleRefZ@Base 12
+ _D4core3sys5posix5netdb12__ModuleInfoZ@Base 12
+ _D4core3sys5posix5netdb6netent6__initZ@Base 12
+ _D4core3sys5posix5netdb7hostent6__initZ@Base 12
+ _D4core3sys5posix5netdb7hostent6h_addrMUNdZPa@Base 12
+ _D4core3sys5posix5netdb7servent6__initZ@Base 12
+ _D4core3sys5posix5netdb8addrinfo6__initZ@Base 12
+ _D4core3sys5posix5netdb8protoent6__initZ@Base 12
+ _D4core3sys5posix5sched11__moduleRefZ@Base 12
+ _D4core3sys5posix5sched11sched_param6__initZ@Base 12
+ _D4core3sys5posix5sched12__ModuleInfoZ@Base 12
+ _D4core3sys5posix5spawn11__moduleRefZ@Base 12
+ _D4core3sys5posix5spawn12__ModuleInfoZ@Base 12
+ _D4core3sys5posix5spawn17posix_spawnattr_t6__initZ@Base 12
+ _D4core3sys5posix5spawn26posix_spawn_file_actions_t6__initZ@Base 12
+ _D4core3sys5posix5stdio11__moduleRefZ@Base 12
+ _D4core3sys5posix5stdio12__ModuleInfoZ@Base 12
+ _D4core3sys5posix5utime11__moduleRefZ@Base 12
+ _D4core3sys5posix5utime12__ModuleInfoZ@Base 12
+ _D4core3sys5posix5utime7utimbuf6__initZ@Base 12
+ _D4core3sys5posix6config11__moduleRefZ@Base 12
+ _D4core3sys5posix6config12__ModuleInfoZ@Base 12
+ _D4core3sys5posix6dirent11__moduleRefZ@Base 12
+ _D4core3sys5posix6dirent12__ModuleInfoZ@Base 12
+ _D4core3sys5posix6dirent3DIR6__initZ@Base 12
+ _D4core3sys5posix6direntQh6__initZ@Base 12
+ _D4core3sys5posix6libgen11__moduleRefZ@Base 12
+ _D4core3sys5posix6libgen12__ModuleInfoZ@Base 12
+ _D4core3sys5posix6locale11__moduleRefZ@Base 12
+ _D4core3sys5posix6locale12__ModuleInfoZ@Base 12
+ _D4core3sys5posix6locale5lconv6__initZ@Base 12
+ _D4core3sys5posix6mqueue11__moduleRefZ@Base 12
+ _D4core3sys5posix6mqueue12__ModuleInfoZ@Base 12
+ _D4core3sys5posix6mqueue7mq_attr6__initZ@Base 12
+ _D4core3sys5posix6setjmp11__moduleRefZ@Base 12
+ _D4core3sys5posix6setjmp12__ModuleInfoZ@Base 12
+ _D4core3sys5posix6setjmp13__jmp_buf_tag6__initZ@Base 12
+ _D4core3sys5posix6signal11__moduleRefZ@Base 12
+ _D4core3sys5posix6signal11sigaction_t6__initZ@Base 12
+ _D4core3sys5posix6signal12__ModuleInfoZ@Base 12
+ _D4core3sys5posix6signal6sigval6__initZ@Base 12
+ _D4core3sys5posix6signal7stack_t6__initZ@Base 12
+ _D4core3sys5posix6signal8SIGRTMAXUNbNdNiZ3sigi@Base 12
+ _D4core3sys5posix6signal8SIGRTMINUNbNdNiZ3sigi@Base 12
+ _D4core3sys5posix6signal8sigevent6__initZ@Base 12
+ _D4core3sys5posix6signal8sigset_t6__initZ@Base 12
+ _D4core3sys5posix6signal8sigstack6__initZ@Base 12
+ _D4core3sys5posix6signal8timespec6__initZ@Base 12
+ _D4core3sys5posix6signal9siginfo_t11_sifields_t10_sigpoll_t6__initZ@Base 12
+ _D4core3sys5posix6signal9siginfo_t11_sifields_t11_sigchild_t6__initZ@Base 12
+ _D4core3sys5posix6signal9siginfo_t11_sifields_t11_sigfault_t6__initZ@Base 12
+ _D4core3sys5posix6signal9siginfo_t11_sifields_t5_rt_t6__initZ@Base 12
+ _D4core3sys5posix6signal9siginfo_t11_sifields_t6__initZ@Base 12
+ _D4core3sys5posix6signal9siginfo_t11_sifields_t7_kill_t6__initZ@Base 12
+ _D4core3sys5posix6signal9siginfo_t11_sifields_t8_timer_t6__initZ@Base 12
+ _D4core3sys5posix6signal9siginfo_t6__initZ@Base 12
+ _D4core3sys5posix6signal9siginfo_t6si_pidMUNbNcNdNiNjZi@Base 12
+ _D4core3sys5posix6signal9siginfo_t6si_uidMUNbNcNdNiNjZk@Base 12
+ _D4core3sys5posix6signal9siginfo_t7si_addrMUNbNcNdNiNjZPv@Base 12
+ _D4core3sys5posix6signal9siginfo_t7si_bandMUNbNcNdNiNjZl@Base 12
+ _D4core3sys5posix6signal9siginfo_t8si_valueMUNbNcNdNiNjZSQCdQCbQCaQBx6sigval@Base 12
+ _D4core3sys5posix6signal9siginfo_t9si_statusMUNbNcNdNiNjZi@Base 12
+ _D4core3sys5posix6stdlib11__moduleRefZ@Base 12
+ _D4core3sys5posix6stdlib12__ModuleInfoZ@Base 12
+ _D4core3sys5posix6string11__moduleRefZ@Base 12
+ _D4core3sys5posix6string12__ModuleInfoZ@Base 12
+ _D4core3sys5posix6syslog11__moduleRefZ@Base 12
+ _D4core3sys5posix6syslog12__ModuleInfoZ@Base 12
+ _D4core3sys5posix6unistd11__moduleRefZ@Base 12
+ _D4core3sys5posix6unistd12__ModuleInfoZ@Base 12
+ _D4core3sys5posix7netinet3in_11__moduleRefZ@Base 12
+ _D4core3sys5posix7netinet3in_11sockaddr_in6__initZ@Base 12
+ _D4core3sys5posix7netinet3in_12__ModuleInfoZ@Base 12
+ _D4core3sys5posix7netinet3in_12sockaddr_in66__initZ@Base 12
+ _D4core3sys5posix7netinet3in_20IN6_IS_ADDR_LOOPBACKFNaNbNiPSQCgQCeQCdQCaQBv8in6_addrZi@Base 12
+ _D4core3sys5posix7netinet3in_20IN6_IS_ADDR_V4COMPATFNaNbNiPSQCgQCeQCdQCaQBv8in6_addrZi@Base 12
+ _D4core3sys5posix7netinet3in_20IN6_IS_ADDR_V4MAPPEDFNaNbNiPSQCgQCeQCdQCaQBv8in6_addrZi@Base 12
+ _D4core3sys5posix7netinet3in_21IN6_IS_ADDR_LINKLOCALFNaNbNiPSQChQCfQCeQCbQBw8in6_addrZi@Base 12
+ _D4core3sys5posix7netinet3in_21IN6_IS_ADDR_MC_GLOBALFNaNbNiPSQChQCfQCeQCbQBw8in6_addrZi@Base 12
+ _D4core3sys5posix7netinet3in_21IN6_IS_ADDR_MULTICASTFNaNbNiPSQChQCfQCeQCbQBw8in6_addrZi@Base 12
+ _D4core3sys5posix7netinet3in_21IN6_IS_ADDR_SITELOCALFNaNbNiPSQChQCfQCeQCbQBw8in6_addrZi@Base 12
+ _D4core3sys5posix7netinet3in_23IN6_IS_ADDR_MC_ORGLOCALFNaNbNiPSQCjQChQCgQCdQBy8in6_addrZi@Base 12
+ _D4core3sys5posix7netinet3in_23IN6_IS_ADDR_UNSPECIFIEDFNaNbNiPSQCjQChQCgQCdQBy8in6_addrZi@Base 12
+ _D4core3sys5posix7netinet3in_24IN6_IS_ADDR_MC_LINKLOCALFNaNbNiPSQCkQCiQChQCeQBz8in6_addrZi@Base 12
+ _D4core3sys5posix7netinet3in_24IN6_IS_ADDR_MC_NODELOCALFNaNbNiPSQCkQCiQChQCeQBz8in6_addrZi@Base 12
+ _D4core3sys5posix7netinet3in_24IN6_IS_ADDR_MC_SITELOCALFNaNbNiPSQCkQCiQChQCeQBz8in6_addrZi@Base 12
+ _D4core3sys5posix7netinet3in_8in6_addr6__initZ@Base 12
+ _D4core3sys5posix7netinet3in_9ipv6_mreq6__initZ@Base 12
+ _D4core3sys5posix7netinet3tcp11__moduleRefZ@Base 12
+ _D4core3sys5posix7netinet3tcp12__ModuleInfoZ@Base 12
+ _D4core3sys5posix7pthread11__moduleRefZ@Base 12
+ _D4core3sys5posix7pthread12__ModuleInfoZ@Base 12
+ _D4core3sys5posix7pthread15pthread_cleanup6__initZ@Base 12
+ _D4core3sys5posix7pthread15pthread_cleanup__T3popZQfMFNbiZv@Base 12
+ _D4core3sys5posix7pthread15pthread_cleanup__T4pushHTPUNaNbNiPvZvZQuMFNbNiQvQpZv@Base 12
+ _D4core3sys5posix7pthread23_pthread_cleanup_buffer6__initZ@Base 12
+ _D4core3sys5posix7strings11__moduleRefZ@Base 12
+ _D4core3sys5posix7strings12__ModuleInfoZ@Base 12
+ _D4core3sys5posix7termios11__moduleRefZ@Base 12
+ _D4core3sys5posix7termios12__ModuleInfoZ@Base 12
+ _D4core3sys5posix7termiosQi6__initZ@Base 12
+ _D4core3sys5posix8inttypes11__moduleRefZ@Base 12
+ _D4core3sys5posix8inttypes12__ModuleInfoZ@Base 12
+ _D4core3sys5posix8ucontext10mcontext_t6__initZ@Base 12
+ _D4core3sys5posix8ucontext10ucontext_t6__initZ@Base 12
+ _D4core3sys5posix8ucontext11__moduleRefZ@Base 12
+ _D4core3sys5posix8ucontext12__ModuleInfoZ@Base 12
+ _D4core3sys5posix8ucontext12_libc_fpxreg6__initZ@Base 12
+ _D4core3sys5posix8ucontext12_libc_xmmreg6__initZ@Base 12
+ _D4core3sys5posix8ucontext13_libc_fpstate6__initZ@Base 12
+ _D4core3sys5posix9semaphore11__moduleRefZ@Base 12
+ _D4core3sys5posix9semaphore12__ModuleInfoZ@Base 12
+ _D4core3sys5posix9semaphore17_pthread_fastlock6__initZ@Base 12
+ _D4core3sys5posix9semaphore5sem_t6__initZ@Base 12
+ _D4core3sys5posixQk2un11__moduleRefZ@Base 12
+ _D4core3sys5posixQk2un11sockaddr_un6__initZ@Base 12
+ _D4core3sys5posixQk2un12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk3ipc11__moduleRefZ@Base 12
+ _D4core3sys5posixQk3ipc12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk3ipc8ipc_perm6__initZ@Base 12
+ _D4core3sys5posixQk3msg11__moduleRefZ@Base 12
+ _D4core3sys5posixQk3msg12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk3msg6msgbuf6__initZ@Base 12
+ _D4core3sys5posixQk3msg7msginfo6__initZ@Base 12
+ _D4core3sys5posixQk3msg8msqid_ds6__initZ@Base 12
+ _D4core3sys5posixQk3shm11__moduleRefZ@Base 12
+ _D4core3sys5posixQk3shm12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk3shm8shmid_ds6__initZ@Base 12
+ _D4core3sys5posixQk3uio11__moduleRefZ@Base 12
+ _D4core3sys5posixQk3uio12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk3uio5iovec6__initZ@Base 12
+ _D4core3sys5posixQk4mman11__moduleRefZ@Base 12
+ _D4core3sys5posixQk4mman12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk4stat11__moduleRefZ@Base 12
+ _D4core3sys5posixQk4stat12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk4stat6stat_t6__initZ@Base 12
+ _D4core3sys5posixQk4stat7S_ISBLKFNbNikZb@Base 12
+ _D4core3sys5posixQk4stat7S_ISCHRFNbNikZb@Base 12
+ _D4core3sys5posixQk4stat7S_ISDIRFNbNikZb@Base 12
+ _D4core3sys5posixQk4stat7S_ISLNKFNbNikZb@Base 12
+ _D4core3sys5posixQk4stat7S_ISREGFNbNikZb@Base 12
+ _D4core3sys5posixQk4stat8S_ISFIFOFNbNikZb@Base 12
+ _D4core3sys5posixQk4stat8S_ISSOCKFNbNikZb@Base 12
+ _D4core3sys5posixQk4stat8S_ISTYPEFNbNikkZb@Base 12
+ _D4core3sys5posixQk4time11__moduleRefZ@Base 12
+ _D4core3sys5posixQk4time12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk4time7timeval6__initZ@Base 12
+ _D4core3sys5posixQk4time9itimerval6__initZ@Base 12
+ _D4core3sys5posixQk4wait10WIFSTOPPEDFNaNbNiNfiZb@Base 12
+ _D4core3sys5posixQk4wait10__WTERMSIGFNaNbNiNfiZi@Base 12
+ _D4core3sys5posixQk4wait11WEXITSTATUSFNaNbNiNfiZi@Base 12
+ _D4core3sys5posixQk4wait11WIFSIGNALEDFNaNbNiNfiZb@Base 12
+ _D4core3sys5posixQk4wait11__moduleRefZ@Base 12
+ _D4core3sys5posixQk4wait12WIFCONTINUEDFNaNbNiNfiZi@Base 12
+ _D4core3sys5posixQk4wait12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk4wait8WSTOPSIGFNaNbNiNfiZi@Base 12
+ _D4core3sys5posixQk4wait8WTERMSIGFNaNbNiNfiZi@Base 12
+ _D4core3sys5posixQk4wait9WIFEXITEDFNaNbNiNfiZb@Base 12
+ _D4core3sys5posixQk5filio11__moduleRefZ@Base 12
+ _D4core3sys5posixQk5filio12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk5ioctl11__moduleRefZ@Base 12
+ _D4core3sys5posixQk5ioctl12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk5ioctl3_IOFNbNiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl6termio6__initZ@Base 12
+ _D4core3sys5posixQk5ioctl7_IOC_NRFNbNiiZi@Base 12
+ _D4core3sys5posixQk5ioctl7winsize6__initZ@Base 12
+ _D4core3sys5posixQk5ioctl8_IOC_DIRFNbNiiZi@Base 12
+ _D4core3sys5posixQk5ioctl8termios26__initZ@Base 12
+ _D4core3sys5posixQk5ioctl9_IOC_SIZEFNbNiiZi@Base 12
+ _D4core3sys5posixQk5ioctl9_IOC_TYPEFNbNiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IOCTPaZQjFNaNbNiiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IOCTPmZQjFNaNbNiiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IOCTSQBhQBfQBeQBlQBc8termios2ZQBgFNaNbNiiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IOCTiZQiFNaNbNiiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IOCTkZQiFNaNbNiiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IOCTmZQiFNaNbNiiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IOCTnZQiFNaNbNiiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IORTPmZQjFNaNbNiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IORTSQBhQBfQBeQBlQBc8termios2ZQBgFNaNbNiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IORTkZQiFNaNbNiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IORTmZQiFNaNbNiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IOWTPaZQjFNaNbNiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IOWTSQBhQBfQBeQBlQBc8termios2ZQBgFNaNbNiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IOWTiZQiFNaNbNiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IOWTkZQiFNaNbNiiiZi@Base 12
+ _D4core3sys5posixQk5ioctl__T4_IOWTmZQiFNaNbNiiiZi@Base 12
+ _D4core3sys5posixQk5types11__moduleRefZ@Base 12
+ _D4core3sys5posixQk5types12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk5types14pthread_attr_t6__initZ@Base 12
+ _D4core3sys5posixQk5types14pthread_cond_t6__initZ@Base 12
+ _D4core3sys5posixQk5types15pthread_mutex_t6__initZ@Base 12
+ _D4core3sys5posixQk5types16pthread_rwlock_t6__initZ@Base 12
+ _D4core3sys5posixQk5types17_pthread_fastlock6__initZ@Base 12
+ _D4core3sys5posixQk5types17pthread_barrier_t6__initZ@Base 12
+ _D4core3sys5posixQk5types18pthread_condattr_t6__initZ@Base 12
+ _D4core3sys5posixQk5types19pthread_mutexattr_t6__initZ@Base 12
+ _D4core3sys5posixQk5types20pthread_rwlockattr_t6__initZ@Base 12
+ _D4core3sys5posixQk5types21pthread_barrierattr_t6__initZ@Base 12
+ _D4core3sys5posixQk6ioccom11__moduleRefZ@Base 12
+ _D4core3sys5posixQk6ioccom12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk6select11__moduleRefZ@Base 12
+ _D4core3sys5posixQk6select12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk6select6FD_CLRFNaNbNiiPSQBpQBnQBmQBtQBk6fd_setZv@Base 12
+ _D4core3sys5posixQk6select6FD_SETFNaNbNiiPSQBpQBnQBmQBtQBk6fd_setZv@Base 12
+ _D4core3sys5posixQk6select6fd_set6__initZ@Base 12
+ _D4core3sys5posixQk6select7FD_ZEROFNaNbNiPSQBpQBnQBmQBtQBk6fd_setZv@Base 12
+ _D4core3sys5posixQk6select7__FDELTFNaNbNiiZk@Base 12
+ _D4core3sys5posixQk6select8FD_ISSETFNaNbNiiPxSQBsQBqQBpQBwQBn6fd_setZb@Base 12
+ _D4core3sys5posixQk6select8__FDMASKFNaNbNiiZl@Base 12
+ _D4core3sys5posixQk6socket10CMSG_ALIGNFNaNbNimZm@Base 12
+ _D4core3sys5posixQk6socket10CMSG_SPACEFNaNbNimZm@Base 12
+ _D4core3sys5posixQk6socket11CMSG_NXTHDRFNaNbNiPNgSQBwQBuQBtQCaQBr6msghdrPNgSQCwQCuQCtQDaQCr7cmsghdrZQBc@Base 12
+ _D4core3sys5posixQk6socket11__moduleRefZ@Base 12
+ _D4core3sys5posixQk6socket12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk6socket13CMSG_FIRSTHDRFNaNbNiPNgSQByQBwQBvQCcQBt6msghdrZPNgSQCzQCxQCwQDdQCu7cmsghdr@Base 12
+ _D4core3sys5posixQk6socket16sockaddr_storage6__initZ@Base 12
+ _D4core3sys5posixQk6socket6linger6__initZ@Base 12
+ _D4core3sys5posixQk6socket6msghdr6__initZ@Base 12
+ _D4core3sys5posixQk6socket7cmsghdr6__initZ@Base 12
+ _D4core3sys5posixQk6socket8CMSG_LENFNaNbNimZm@Base 12
+ _D4core3sys5posixQk6socket8sockaddr6__initZ@Base 12
+ _D4core3sys5posixQk6socket9CMSG_DATAFNaNbNiNkMPNgSQBwQBuQBtQCaQBr7cmsghdrZPNgh@Base 12
+ _D4core3sys5posixQk6ttycom11__moduleRefZ@Base 12
+ _D4core3sys5posixQk6ttycom12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk7statvfs11__moduleRefZ@Base 12
+ _D4core3sys5posixQk7statvfs12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk7statvfs5FFlag6__initZ@Base 12
+ _D4core3sys5posixQk7statvfs9statvfs_t6__initZ@Base 12
+ _D4core3sys5posixQk7utsname11__moduleRefZ@Base 12
+ _D4core3sys5posixQk7utsname12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk7utsnameQi6__initZ@Base 12
+ _D4core3sys5posixQk8resource11__moduleRefZ@Base 12
+ _D4core3sys5posixQk8resource12__ModuleInfoZ@Base 12
+ _D4core3sys5posixQk8resource6rlimit6__initZ@Base 12
+ _D4core3sys5posixQk8resource6rusage6__initZ@Base 12
+ _D4core4math11__moduleRefZ@Base 12
+ _D4core4math12__ModuleInfoZ@Base 12
+ _D4core4simd11__moduleRefZ@Base 12
+ _D4core4simd12__ModuleInfoZ@Base 12
+ _D4core4stdc4fenv11__moduleRefZ@Base 12
+ _D4core4stdc4fenv12__ModuleInfoZ@Base 12
+ _D4core4stdc4fenv6fenv_t6__initZ@Base 12
+ _D4core4stdc4math11__moduleRefZ@Base 12
+ _D4core4stdc4math11islessequalFNaNbNiNeddZi@Base 12
+ _D4core4stdc4math11islessequalFNaNbNiNeeeZi@Base 12
+ _D4core4stdc4math11islessequalFNaNbNiNeffZi@Base 12
+ _D4core4stdc4math11isunorderedFNaNbNiNeddZi@Base 12
+ _D4core4stdc4math11isunorderedFNaNbNiNeeeZi@Base 12
+ _D4core4stdc4math11isunorderedFNaNbNiNeffZi@Base 12
+ _D4core4stdc4math12__ModuleInfoZ@Base 12
+ _D4core4stdc4math13islessgreaterFNaNbNiNeddZi@Base 12
+ _D4core4stdc4math13islessgreaterFNaNbNiNeeeZi@Base 12
+ _D4core4stdc4math13islessgreaterFNaNbNiNeffZi@Base 12
+ _D4core4stdc4math14isgreaterequalFNaNbNiNeddZi@Base 12
+ _D4core4stdc4math14isgreaterequalFNaNbNiNeeeZi@Base 12
+ _D4core4stdc4math14isgreaterequalFNaNbNiNeffZi@Base 12
+ _D4core4stdc4math6islessFNaNbNiNeddZi@Base 12
+ _D4core4stdc4math6islessFNaNbNiNeeeZi@Base 12
+ _D4core4stdc4math6islessFNaNbNiNeffZi@Base 12
+ _D4core4stdc4math8isnormalFNaNbNiNedZi@Base 12
+ _D4core4stdc4math8isnormalFNaNbNiNeeZi@Base 12
+ _D4core4stdc4math8isnormalFNaNbNiNefZi@Base 12
+ _D4core4stdc4math9isgreaterFNaNbNiNeddZi@Base 12
+ _D4core4stdc4math9isgreaterFNaNbNiNeeeZi@Base 12
+ _D4core4stdc4math9isgreaterFNaNbNiNeffZi@Base 12
+ _D4core4stdc4time11__moduleRefZ@Base 12
+ _D4core4stdc4time12__ModuleInfoZ@Base 12
+ _D4core4stdc5ctype11__moduleRefZ@Base 12
+ _D4core4stdc5ctype12__ModuleInfoZ@Base 12
+ _D4core4stdc5errno11__moduleRefZ@Base 12
+ _D4core4stdc5errno12__ModuleInfoZ@Base 12
+ _D4core4stdc5stdio11__moduleRefZ@Base 12
+ _D4core4stdc5stdio12__ModuleInfoZ@Base 12
+ _D4core4stdc5stdio6fpos_t6__initZ@Base 12
+ _D4core4stdc5stdio8_IO_FILE6__initZ@Base 12
+ _D4core4stdc6config11__moduleRefZ@Base 12
+ _D4core4stdc6config12__ModuleInfoZ@Base 12
+ _D4core4stdc6config__T8_ComplexTdZQm11__xopEqualsMxFKxSQCbQBzQBx__TQBtTdZQBzZb@Base 12
+ _D4core4stdc6config__T8_ComplexTdZQm6__initZ@Base 12
+ _D4core4stdc6config__T8_ComplexTdZQm9__xtoHashFNbNeKxSQCaQByQBw__TQBsTdZQByZm@Base 12
+ _D4core4stdc6config__T8_ComplexTeZQm11__xopEqualsMxFKxSQCbQBzQBx__TQBtTeZQBzZb@Base 12
+ _D4core4stdc6config__T8_ComplexTeZQm6__initZ@Base 12
+ _D4core4stdc6config__T8_ComplexTeZQm9__xtoHashFNbNeKxSQCaQByQBw__TQBsTeZQByZm@Base 12
+ _D4core4stdc6config__T8_ComplexTfZQm11__xopEqualsMxFKxSQCbQBzQBx__TQBtTfZQBzZb@Base 12
+ _D4core4stdc6config__T8_ComplexTfZQm6__initZ@Base 12
+ _D4core4stdc6config__T8_ComplexTfZQm9__xtoHashFNbNeKxSQCaQByQBw__TQBsTfZQByZm@Base 12
+ _D4core4stdc6float_11__moduleRefZ@Base 12
+ _D4core4stdc6float_12__ModuleInfoZ@Base 12
+ _D4core4stdc6limits11__moduleRefZ@Base 12
+ _D4core4stdc6limits12__ModuleInfoZ@Base 12
+ _D4core4stdc6locale11__moduleRefZ@Base 12
+ _D4core4stdc6locale12__ModuleInfoZ@Base 12
+ _D4core4stdc6locale5lconv6__initZ@Base 12
+ _D4core4stdc6signal11__moduleRefZ@Base 12
+ _D4core4stdc6signal12__ModuleInfoZ@Base 12
+ _D4core4stdc6stdarg11__moduleRefZ@Base 12
+ _D4core4stdc6stdarg12__ModuleInfoZ@Base 12
+ _D4core4stdc6stddef11__moduleRefZ@Base 12
+ _D4core4stdc6stddef12__ModuleInfoZ@Base 12
+ _D4core4stdc6stdint11__moduleRefZ@Base 12
+ _D4core4stdc6stdint12__ModuleInfoZ@Base 12
+ _D4core4stdc6stdint__T7_typifyTgZQlFNaNbNiNfgZg@Base 12
+ _D4core4stdc6stdint__T7_typifyThZQlFNaNbNiNfhZh@Base 12
+ _D4core4stdc6stdint__T7_typifyTiZQlFNaNbNiNfiZi@Base 12
+ _D4core4stdc6stdint__T7_typifyTkZQlFNaNbNiNfkZk@Base 12
+ _D4core4stdc6stdint__T7_typifyTlZQlFNaNbNiNflZl@Base 12
+ _D4core4stdc6stdint__T7_typifyTmZQlFNaNbNiNfmZm@Base 12
+ _D4core4stdc6stdint__T7_typifyTsZQlFNaNbNiNfsZs@Base 12
+ _D4core4stdc6stdint__T7_typifyTtZQlFNaNbNiNftZt@Base 12
+ _D4core4stdc6stdlib11__moduleRefZ@Base 12
+ _D4core4stdc6stdlib12__ModuleInfoZ@Base 12
+ _D4core4stdc6stdlib5div_t6__initZ@Base 12
+ _D4core4stdc6stdlib6ldiv_t6__initZ@Base 12
+ _D4core4stdc6stdlib7lldiv_t6__initZ@Base 12
+ _D4core4stdc6string11__moduleRefZ@Base 12
+ _D4core4stdc6string12__ModuleInfoZ@Base 12
+ _D4core4stdc6tgmath11__moduleRefZ@Base 12
+ _D4core4stdc6tgmath12__ModuleInfoZ@Base 12
+ _D4core4stdc6wchar_11__moduleRefZ@Base 12
+ _D4core4stdc6wchar_12__ModuleInfoZ@Base 12
+ _D4core4stdc6wchar_8getwcharFNbNiNeZw@Base 12
+ _D4core4stdc6wchar_8putwcharFNbNiNewZw@Base 12
+ _D4core4stdc6wchar_9mbstate_t6__initZ@Base 12
+ _D4core4stdc6wchar_9mbstate_t8___value6__initZ@Base 12
+ _D4core4stdc6wctype11__moduleRefZ@Base 12
+ _D4core4stdc6wctype12__ModuleInfoZ@Base 12
+ _D4core4stdc7assert_11__moduleRefZ@Base 12
+ _D4core4stdc7assert_12__ModuleInfoZ@Base 12
+ _D4core4stdc7complex11__moduleRefZ@Base 12
+ _D4core4stdc7complex12__ModuleInfoZ@Base 12
+ _D4core4stdc8inttypes11__moduleRefZ@Base 12
+ _D4core4stdc8inttypes12__ModuleInfoZ@Base 12
+ _D4core4stdc8inttypes9imaxdiv_t6__initZ@Base 12
+ _D4core4sync5event11__moduleRefZ@Base 12
+ _D4core4sync5event12__ModuleInfoZ@Base 12
+ _D4core4sync5event5Event10initializeMFNbNibbZv@Base 12
+ _D4core4sync5event5Event14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core4sync5event5Event3setMFNbNiZv@Base 12
+ _D4core4sync5event5Event4waitMFNbNiSQBi4time8DurationZb@Base 12
+ _D4core4sync5event5Event4waitMFNbNiZb@Base 12
+ _D4core4sync5event5Event5resetMFNbNiZv@Base 12
+ _D4core4sync5event5Event6__ctorMFNbNcNibbZSQBpQBnQBlQBi@Base 12
+ _D4core4sync5event5Event6__dtorMFNbNiZv@Base 12
+ _D4core4sync5event5Event6__initZ@Base 12
+ _D4core4sync5event5Event9terminateMFNbNiZv@Base 12
+ _D4core4sync5mutex11__moduleRefZ@Base 12
+ _D4core4sync5mutex12__ModuleInfoZ@Base 12
+ _D4core4sync5mutex5Mutex10handleAddrMFZPSQBn3sys5posixQk5types15pthread_mutex_t@Base 12
+ _D4core4sync5mutex5Mutex12MonitorProxy11__xopEqualsMxFKxSQCdQCbQBzQBwQBtZb@Base 12
+ _D4core4sync5mutex5Mutex12MonitorProxy6__initZ@Base 12
+ _D4core4sync5mutex5Mutex12MonitorProxy9__xtoHashFNbNeKxSQCcQCaQByQBvQBsZm@Base 12
+ _D4core4sync5mutex5Mutex4lockMFNeZv@Base 12
+ _D4core4sync5mutex5Mutex4lockMOFNeZv@Base 12
+ _D4core4sync5mutex5Mutex6__ctorMFNbNiNeC6ObjectZCQBvQBtQBrQBo@Base 12
+ _D4core4sync5mutex5Mutex6__ctorMFNbNiNeZCQBnQBlQBjQBg@Base 12
+ _D4core4sync5mutex5Mutex6__ctorMOFNbNiNeC6ObjectZOCQBxQBvQBtQBq@Base 12
+ _D4core4sync5mutex5Mutex6__ctorMOFNbNiNeZOCQBpQBnQBlQBi@Base 12
+ _D4core4sync5mutex5Mutex6__dtorMFNbNiNeZv@Base 12
+ _D4core4sync5mutex5Mutex6__initZ@Base 12
+ _D4core4sync5mutex5Mutex6__vtblZ@Base 12
+ _D4core4sync5mutex5Mutex6unlockMFNeZv@Base 12
+ _D4core4sync5mutex5Mutex6unlockMOFNeZv@Base 12
+ _D4core4sync5mutex5Mutex7__ClassZ@Base 12
+ _D4core4sync5mutex5Mutex7tryLockMFNeZb@Base 12
+ _D4core4sync5mutex5Mutex7tryLockMOFNeZb@Base 12
+ _D4core4sync5mutex5Mutex__T12lock_nothrowTCQBpQBnQBlQBiZQBdMFNbNiNeZv@Base 12
+ _D4core4sync5mutex5Mutex__T12lock_nothrowTOCQBqQBoQBmQBjZQBeMOFNbNiNeZv@Base 12
+ _D4core4sync5mutex5Mutex__T14unlock_nothrowTCQBrQBpQBnQBkZQBfMFNbNiNeZv@Base 12
+ _D4core4sync5mutex5Mutex__T14unlock_nothrowTOCQBsQBqQBoQBlZQBgMOFNbNiNeZv@Base 12
+ _D4core4sync5mutex5Mutex__T15tryLock_nothrowTCQBsQBqQBoQBlZQBgMFNbNiNeZb@Base 12
+ _D4core4sync5mutex5Mutex__T15tryLock_nothrowTOCQBtQBrQBpQBmZQBhMOFNbNiNeZb@Base 12
+ _D4core4sync5mutex5Mutex__T6__ctorTCQBiQBgQBeQBbZQwMFNbNiNeC6ObjectbZQBi@Base 12
+ _D4core4sync5mutex5Mutex__T6__ctorTCQBiQBgQBeQBbZQwMFNbNiNebZQBa@Base 12
+ _D4core4sync5mutex5Mutex__T6__ctorTOCQBjQBhQBfQBcZQxMOFNbNiNeC6ObjectbZOQBk@Base 12
+ _D4core4sync5mutex5Mutex__T6__ctorTOCQBjQBhQBfQBcZQxMOFNbNiNebZOQBc@Base 12
+ _D4core4sync6config11__moduleRefZ@Base 12
+ _D4core4sync6config12__ModuleInfoZ@Base 12
+ _D4core4sync6config7mktspecFNbNiKSQBg3sys5posix6signal8timespecSQCk4time8DurationZv@Base 12
+ _D4core4sync6config7mktspecFNbNiKSQBg3sys5posix6signal8timespecZv@Base 12
+ _D4core4sync6config7mvtspecFNbNiKSQBg3sys5posix6signal8timespecSQCk4time8DurationZv@Base 12
+ _D4core4sync7barrier11__moduleRefZ@Base 12
+ _D4core4sync7barrier12__ModuleInfoZ@Base 12
+ _D4core4sync7barrier7Barrier4waitMFZv@Base 12
+ _D4core4sync7barrier7Barrier6__ctorMFkZCQBmQBkQBiQBd@Base 12
+ _D4core4sync7barrier7Barrier6__initZ@Base 12
+ _D4core4sync7barrier7Barrier6__vtblZ@Base 12
+ _D4core4sync7barrier7Barrier7__ClassZ@Base 12
+ _D4core4sync7rwmutex11__moduleRefZ@Base 12
+ _D4core4sync7rwmutex12__ModuleInfoZ@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader12MonitorProxy11__xopEqualsMxFKxSQCwQCuQCsQCnQCaQBwZb@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader12MonitorProxy6__initZ@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader12MonitorProxy9__xtoHashFNbNeKxSQCvQCtQCrQCmQBzQBvZm@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader4lockMFNeZv@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader4lockMOFNeZv@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader6__initZ@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader6__vtblZ@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader6unlockMFNeZv@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader6unlockMOFNeZv@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader7__ClassZ@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader7tryLockMFNeSQCc4time8DurationZb@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader7tryLockMFNeZb@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader7tryLockMOFNeSQCd4time8DurationZb@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader7tryLockMOFNeZb@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader__T17shouldQueueReaderTCQCnQClQCjQCeQBrZQBlMFNaNbNdNiNfZb@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader__T17shouldQueueReaderTOCQCoQCmQCkQCfQBsZQBmMOFNaNbNdNiNfZb@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader__T6__ctorTCQCbQBzQBxQBsQBfZQzMFNaNbNiNeZQBe@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Reader__T6__ctorTOCQCcQCaQByQBtQBgZQBaMOFNaNbNiNeZOQBh@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer12MonitorProxy11__xopEqualsMxFKxSQCwQCuQCsQCnQCaQBwZb@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer12MonitorProxy6__initZ@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer12MonitorProxy9__xtoHashFNbNeKxSQCvQCtQCrQCmQBzQBvZm@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer4lockMFNeZv@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer4lockMOFNeZv@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer6__initZ@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer6__vtblZ@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer6unlockMFNeZv@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer6unlockMOFNeZv@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer7__ClassZ@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer7tryLockMFNeSQCc4time8DurationZb@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer7tryLockMFNeZb@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer7tryLockMOFNeSQCd4time8DurationZb@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer7tryLockMOFNeZb@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer__T17shouldQueueWriterTCQCnQClQCjQCeQBrZQBlMFNaNbNdNiNfZb@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer__T17shouldQueueWriterTOCQCoQCmQCkQCfQBsZQBmMOFNaNbNdNiNfZb@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer__T6__ctorTCQCbQBzQBxQBsQBfZQzMFNaNbNiNeZQBe@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6Writer__T6__ctorTOCQCcQCaQByQBtQBgZQBaMOFNaNbNiNeZOQBh@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6__ctorMFNbNfEQBwQBuQBsQBn6PolicyZCQCrQCpQCnQCi@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6__ctorMOFNbNfEQBxQBvQBtQBo6PolicyZOCQCtQCrQCpQCk@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6__initZ@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6__vtblZ@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6policyMFNbNdNfZEQBzQBxQBvQBq6Policy@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6policyMOFNbNdNfZEQCaQByQBwQBr6Policy@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6readerMFNbNdNfZCQBzQBxQBvQBq6Reader@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6readerMOFNbNdNfZOCQCbQBzQBxQBs6Reader@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6writerMFNbNdNfZCQBzQBxQBvQBq6Writer@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex6writerMOFNbNdNfZOCQCbQBzQBxQBs6Writer@Base 12
+ _D4core4sync7rwmutex14ReadWriteMutex7__ClassZ@Base 12
+ _D4core4sync9condition11__moduleRefZ@Base 12
+ _D4core4sync9condition12__ModuleInfoZ@Base 12
+ _D4core4sync9condition9Condition13mutex_nothrowMFNaNbNdNiNfZCQChQCf5mutex5Mutex@Base 12
+ _D4core4sync9condition9Condition13mutex_nothrowMOFNaNbNdNiNfZOCQCjQCh5mutex5Mutex@Base 12
+ _D4core4sync9condition9Condition4waitMFSQBm4time8DurationZb@Base 12
+ _D4core4sync9condition9Condition4waitMFZv@Base 12
+ _D4core4sync9condition9Condition4waitMOFSQBn4time8DurationZb@Base 12
+ _D4core4sync9condition9Condition4waitMOFZv@Base 12
+ _D4core4sync9condition9Condition5mutexMFNdZCQBqQBoQs5Mutex@Base 12
+ _D4core4sync9condition9Condition5mutexMOFNdZOCQBsQBqQu5Mutex@Base 12
+ _D4core4sync9condition9Condition6__ctorMFNbNfCQBsQBq5mutex5MutexZCQCmQCkQCiQCb@Base 12
+ _D4core4sync9condition9Condition6__ctorMOFNbNfOCQBuQBs5mutex5MutexZOCQCpQCnQClQCe@Base 12
+ _D4core4sync9condition9Condition6__dtorMFZv@Base 12
+ _D4core4sync9condition9Condition6__initZ@Base 12
+ _D4core4sync9condition9Condition6__vtblZ@Base 12
+ _D4core4sync9condition9Condition6notifyMFZv@Base 12
+ _D4core4sync9condition9Condition6notifyMOFZv@Base 12
+ _D4core4sync9condition9Condition7__ClassZ@Base 12
+ _D4core4sync9condition9Condition9notifyAllMFZv@Base 12
+ _D4core4sync9condition9Condition9notifyAllMOFZv@Base 12
+ _D4core4sync9condition9Condition__T4waitTCQBoQBmQBkQBdZQuMFSQCg4time8DurationbZb@Base 12
+ _D4core4sync9condition9Condition__T4waitTCQBoQBmQBkQBdZQuMFbZv@Base 12
+ _D4core4sync9condition9Condition__T4waitTOCQBpQBnQBlQBeZQvMOFSQCi4time8DurationbZb@Base 12
+ _D4core4sync9condition9Condition__T4waitTOCQBpQBnQBlQBeZQvMOFbZv@Base 12
+ _D4core4sync9condition9Condition__T6__ctorTCQBqQBoQBmQBfTCQCeQCc5mutex5MutexZQBqMFNbNeQBdbZQBw@Base 12
+ _D4core4sync9condition9Condition__T6__ctorTOCQBrQBpQBnQBgTOCQCgQCe5mutex5MutexZQBsMOFNbNeOQBfbZOQCa@Base 12
+ _D4core4sync9condition9Condition__T6notifyTCQBqQBoQBmQBfZQwMFNbbZv@Base 12
+ _D4core4sync9condition9Condition__T6notifyTOCQBrQBpQBnQBgZQxMOFNbbZv@Base 12
+ _D4core4sync9condition9Condition__T9notifyAllTCQBtQBrQBpQBiZQzMFNbbZv@Base 12
+ _D4core4sync9condition9Condition__T9notifyAllTOCQBuQBsQBqQBjZQBaMOFNbbZv@Base 12
+ _D4core4sync9exception11__moduleRefZ@Base 12
+ _D4core4sync9exception12__ModuleInfoZ@Base 12
+ _D4core4sync9exception9SyncError6__ctorMFNaNbNfAyaC6object9ThrowableQvmZCQCtQCrQCpQCi@Base 12
+ _D4core4sync9exception9SyncError6__ctorMFNaNbNfAyaQdmC6object9ThrowableZCQCtQCrQCpQCi@Base 12
+ _D4core4sync9exception9SyncError6__initZ@Base 12
+ _D4core4sync9exception9SyncError6__vtblZ@Base 12
+ _D4core4sync9exception9SyncError7__ClassZ@Base 12
+ _D4core4sync9semaphore11__moduleRefZ@Base 12
+ _D4core4sync9semaphore12__ModuleInfoZ@Base 12
+ _D4core4sync9semaphore9Semaphore4waitMFSQBm4time8DurationZb@Base 12
+ _D4core4sync9semaphore9Semaphore4waitMFZv@Base 12
+ _D4core4sync9semaphore9Semaphore6__ctorMFkZCQBqQBoQBmQBf@Base 12
+ _D4core4sync9semaphore9Semaphore6__dtorMFZv@Base 12
+ _D4core4sync9semaphore9Semaphore6__initZ@Base 12
+ _D4core4sync9semaphore9Semaphore6__vtblZ@Base 12
+ _D4core4sync9semaphore9Semaphore6notifyMFZv@Base 12
+ _D4core4sync9semaphore9Semaphore7__ClassZ@Base 12
+ _D4core4sync9semaphore9Semaphore7tryWaitMFZb@Base 12
+ _D4core4time11__moduleRefZ@Base 12
+ _D4core4time11_posixClockFNaNbNiNfEQBhQBf9ClockTypeZi@Base 12
+ _D4core4time12TickDuration11ticksPerSecyl@Base 12
+ _D4core4time12TickDuration14currSystemTickFNbNdNiNeZSQBzQBxQBv@Base 12
+ _D4core4time12TickDuration27_sharedStaticCtor_L2825_C14FNeZv@Base 12
+ _D4core4time12TickDuration3maxFNaNbNdNiNfZSQBpQBnQBl@Base 12
+ _D4core4time12TickDuration3minFNaNbNdNiNfZSQBpQBnQBl@Base 12
+ _D4core4time12TickDuration4zeroFNaNbNdNiNfZSQBqQBoQBm@Base 12
+ _D4core4time12TickDuration5msecsMxFNaNbNdNiNfZl@Base 12
+ _D4core4time12TickDuration5nsecsMxFNaNbNdNiNfZl@Base 12
+ _D4core4time12TickDuration5opCmpMxFNaNbNiNfSQBqQBoQBmZi@Base 12
+ _D4core4time12TickDuration5usecsMxFNaNbNdNiNfZl@Base 12
+ _D4core4time12TickDuration6__ctorMFNaNbNcNiNflZSQBuQBsQBq@Base 12
+ _D4core4time12TickDuration6__initZ@Base 12
+ _D4core4time12TickDuration6hnsecsMxFNaNbNdNiNfZl@Base 12
+ _D4core4time12TickDuration7secondsMxFNaNbNdNiNfZl@Base 12
+ _D4core4time12TickDuration8__xopCmpMxFKxSQBnQBlQBjZi@Base 12
+ _D4core4time12TickDuration9appOriginySQBkQBiQBg@Base 12
+ _D4core4time12__ModuleInfoZ@Base 12
+ _D4core4time12nsecsToTicksFNaNbNiNflZl@Base 12
+ _D4core4time12ticksToNSecsFNaNbNiNflZl@Base 12
+ _D4core4time13TimeException6__ctorMFNaNbNfAyaC6object9ThrowableQvmZCQCoQCmQCk@Base 12
+ _D4core4time13TimeException6__ctorMFNaNbNfAyaQdmC6object9ThrowableZCQCoQCmQCk@Base 12
+ _D4core4time13TimeException6__initZ@Base 12
+ _D4core4time13TimeException6__vtblZ@Base 12
+ _D4core4time13TimeException7__ClassZ@Base 12
+ _D4core4time13_clockTypeIdxFEQBbQz9ClockTypeZm@Base 12
+ _D4core4time13convClockFreqFNaNbNiNflllZl@Base 12
+ _D4core4time14_clockTypeNameFEQBcQBa9ClockTypeZAya@Base 12
+ _D4core4time15_ticksPerSecondyG8l@Base 12
+ _D4core4time25unitsAreInDescendingOrderFMAAyaZb@Base 12
+ _D4core4time3absFNaNbNiNfSQyQv12TickDurationZQu@Base 12
+ _D4core4time3absFNaNbNiNfSQyQv8DurationZQp@Base 12
+ _D4core4time4_absFNaNbNiNfdZd@Base 12
+ _D4core4time4_absFNaNbNiNflZl@Base 12
+ _D4core4time8Duration10isNegativeMxFNaNbNdNiNfZb@Base 12
+ _D4core4time8Duration3maxFNaNbNdNiNfZSQBkQBiQBg@Base 12
+ _D4core4time8Duration3minFNaNbNdNiNfZSQBkQBiQBg@Base 12
+ _D4core4time8Duration4zeroFNaNbNdNiNfZSQBlQBjQBh@Base 12
+ _D4core4time8Duration5opCmpMxFNaNbNiNfSQBlQBjQBhZi@Base 12
+ _D4core4time8Duration6__ctorMFNaNbNcNiNflZSQBpQBnQBl@Base 12
+ _D4core4time8Duration6__initZ@Base 12
+ _D4core4time8Duration8__xopCmpMxFKxSQBiQBgQBeZi@Base 12
+ _D4core4time8Duration8toStringMxFNaNbNfZAya@Base 12
+ _D4core4time8Duration__T10opOpAssignVAyaa1_2aZQwMFNaNbNcNiNflZSQCjQChQCf@Base 12
+ _D4core4time8Duration__T10opOpAssignVAyaa1_2bTSQBtQBrQBpZQBhMFNaNbNcNiNfxSQCuQCsQCqZQBm@Base 12
+ _D4core4time8Duration__T5splitVAyaa5_686f757273VQra7_6d696e75746573ZQBsMxFNaNbNiNfZ10SplitUnits6__initZ@Base 12
+ _D4core4time8Duration__T5splitVAyaa5_686f757273VQra7_6d696e75746573ZQBsMxFNaNbNiNfZ12genSplitCallFNaNbNfZQCw@Base 12
+ _D4core4time8Duration__T5splitVAyaa5_686f757273VQra7_6d696e75746573ZQBsMxFNaNbNiNfZ14genMemberDeclsFNaNbNfZQCy@Base 12
+ _D4core4time8Duration__T5splitVAyaa5_686f757273VQra7_6d696e75746573ZQBsMxFNaNbNiNfZSQDeQDcQDa__TQCuVQCra5_686f757273VQDia7_6d696e75746573ZQEkMxFNaNbNiNfZ10SplitUnits@Base 12
+ _D4core4time8Duration__T5splitVAyaa5_686f757273VQra7_6d696e75746573Z__TQBvTiTiZQCdMxFNaNbNiNfJiJiZv@Base 12
+ _D4core4time8Duration__T5splitVAyaa5_686f757273VQra7_6d696e75746573Z__TQBvTlTlZQCdMxFNaNbNiNfJlJlZv@Base 12
+ _D4core4time8Duration__T5splitVAyaa7_7365636f6e6473VQva5_6e73656373ZQBsMxFNaNbNiNfZ10SplitUnits6__initZ@Base 12
+ _D4core4time8Duration__T5splitVAyaa7_7365636f6e6473VQva5_6e73656373ZQBsMxFNaNbNiNfZ12genSplitCallFNaNbNfZQCw@Base 12
+ _D4core4time8Duration__T5splitVAyaa7_7365636f6e6473VQva5_6e73656373ZQBsMxFNaNbNiNfZ14genMemberDeclsFNaNbNfZQCy@Base 12
+ _D4core4time8Duration__T5splitVAyaa7_7365636f6e6473VQva5_6e73656373ZQBsMxFNaNbNiNfZSQDeQDcQDa__TQCuVQCra7_7365636f6e6473VQDma5_6e73656373ZQEkMxFNaNbNiNfZ10SplitUnits@Base 12
+ _D4core4time8Duration__T5splitVAyaa7_7365636f6e6473VQva5_6e73656373Z__TQBvTlTlZQCdMxFNaNbNiNfJlJlZv@Base 12
+ _D4core4time8Duration__T5splitVAyaa7_7365636f6e6473VQva5_7573656373ZQBsMxFNaNbNiNfZ10SplitUnits6__initZ@Base 12
+ _D4core4time8Duration__T5splitVAyaa7_7365636f6e6473VQva5_7573656373ZQBsMxFNaNbNiNfZ12genSplitCallFNaNbNfZQCw@Base 12
+ _D4core4time8Duration__T5splitVAyaa7_7365636f6e6473VQva5_7573656373ZQBsMxFNaNbNiNfZ14genMemberDeclsFNaNbNfZQCy@Base 12
+ _D4core4time8Duration__T5splitVAyaa7_7365636f6e6473VQva5_7573656373ZQBsMxFNaNbNiNfZSQDeQDcQDa__TQCuVQCra7_7365636f6e6473VQDma5_7573656373ZQEkMxFNaNbNiNfZ10SplitUnits@Base 12
+ _D4core4time8Duration__T5splitVAyaa7_7365636f6e6473VQva5_7573656373Z__TQBvTlTlZQCdMxFNaNbNiNfJlJlZv@Base 12
+ _D4core4time8Duration__T5totalVAyaa5_6d73656373ZQyMxFNaNbNdNiNfZl@Base 12
+ _D4core4time8Duration__T5totalVAyaa6_686e73656373ZQBaMxFNaNbNdNiNfZl@Base 12
+ _D4core4time8Duration__T5totalVAyaa7_7365636f6e6473ZQBcMxFNaNbNdNiNfZl@Base 12
+ _D4core4time8Duration__T8opBinaryVAyaa1_2bTSQBqQBoQBmZQBeMxFNaNbNiNfQzZQBc@Base 12
+ _D4core4time8Duration__T8opBinaryVAyaa1_2bTySQBrQBpQBnZQBfMxFNaNbNiNfyQBaZSQCvQCtQCr@Base 12
+ _D4core4time8Duration__T8opBinaryVAyaa1_2dTxSQBrQBpQBnZQBfMxFNaNbNiNfxQBaZSQCvQCtQCr@Base 12
+ _D4core4time8Duration__T8toStringTDFNaNbNfIAaZvZQyMxFMQuZ10appListSepFNaNbNfQBqkbZv@Base 12
+ _D4core4time8Duration__T8toStringTDFNaNbNfIAaZvZQyMxFMQuZ5unitsyAAa@Base 12
+ _D4core4time8Duration__T8toStringTDFNaNbNfIAaZvZQyMxFMQuZ__T10appUnitValVAyaa4_64617973ZQBcFNaNbNfQCmlZv@Base 12
+ _D4core4time8Duration__T8toStringTDFNaNbNfIAaZvZQyMxFMQuZ__T10appUnitValVAyaa5_686f757273ZQBeFNaNbNfQColZv@Base 12
+ _D4core4time8Duration__T8toStringTDFNaNbNfIAaZvZQyMxFMQuZ__T10appUnitValVAyaa5_6d73656373ZQBeFNaNbNfQColZv@Base 12
+ _D4core4time8Duration__T8toStringTDFNaNbNfIAaZvZQyMxFMQuZ__T10appUnitValVAyaa5_7573656373ZQBeFNaNbNfQColZv@Base 12
+ _D4core4time8Duration__T8toStringTDFNaNbNfIAaZvZQyMxFMQuZ__T10appUnitValVAyaa5_7765656b73ZQBeFNaNbNfQColZv@Base 12
+ _D4core4time8Duration__T8toStringTDFNaNbNfIAaZvZQyMxFMQuZ__T10appUnitValVAyaa6_686e73656373ZQBgFNaNbNfQCqlZv@Base 12
+ _D4core4time8Duration__T8toStringTDFNaNbNfIAaZvZQyMxFMQuZ__T10appUnitValVAyaa7_6d696e75746573ZQBiFNaNbNfQCslZv@Base 12
+ _D4core4time8Duration__T8toStringTDFNaNbNfIAaZvZQyMxFMQuZ__T10appUnitValVAyaa7_7365636f6e6473ZQBiFNaNbNfQCslZv@Base 12
+ _D4core4time8Duration__T8toStringTDFNaNbNfIAaZvZQyMxFNaNbNfMQBaZv@Base 12
+ _D4core4time__T12MonoTimeImplVEQBdQBb9ClockTypei0ZQBj14ticksPerSecondFNaNbNdNiNfZl@Base 12
+ _D4core4time__T12MonoTimeImplVEQBdQBb9ClockTypei0ZQBj3maxFNaNbNdNiNfZSQCqQCo__TQCmVQCbi0ZQCw@Base 12
+ _D4core4time__T12MonoTimeImplVEQBdQBb9ClockTypei0ZQBj3minFNaNbNdNiNfZSQCqQCo__TQCmVQCbi0ZQCw@Base 12
+ _D4core4time__T12MonoTimeImplVEQBdQBb9ClockTypei0ZQBj4zeroFNaNbNdNiNfZSQCrQCp__TQCnVQCci0ZQCx@Base 12
+ _D4core4time__T12MonoTimeImplVEQBdQBb9ClockTypei0ZQBj5opCmpMxFNaNbNiNfSQCrQCp__TQCnVQCci0ZQCxZi@Base 12
+ _D4core4time__T12MonoTimeImplVEQBdQBb9ClockTypei0ZQBj5ticksMxFNaNbNdNiNfZl@Base 12
+ _D4core4time__T12MonoTimeImplVEQBdQBb9ClockTypei0ZQBj6__initZ@Base 12
+ _D4core4time__T12MonoTimeImplVEQBdQBb9ClockTypei0ZQBj8__xopCmpMxFKxSQCoQCm__TQCkVQBzi0ZQCuZi@Base 12
+ _D4core4time__T12MonoTimeImplVEQBdQBb9ClockTypei0ZQBj8currTimeFNbNdNiNeZSQCtQCr__TQCpVQCei0ZQCz@Base 12
+ _D4core4time__T12MonoTimeImplVEQBdQBb9ClockTypei0ZQBj8toStringMxFNaNbNfZAya@Base 12
+ _D4core4time__T12MonoTimeImplVEQBdQBb9ClockTypei0ZQBj__T8opBinaryVAyaa1_2bZQtMxFNaNbNiNfSQDjQDh8DurationZSQEaQDy__TQDwVQDli0ZQEg@Base 12
+ _D4core4time__T12MonoTimeImplVEQBdQBb9ClockTypei0ZQBj__T8opBinaryVAyaa1_2dZQtMxFNaNbNiNfSQDjQDh__TQDfVQCui0ZQDpZSQEhQEf8Duration@Base 12
+ _D4core4time__T20splitUnitsFromHNSecsVAyaa4_64617973ZQBmFNaNbNiNfKlZl@Base 12
+ _D4core4time__T20splitUnitsFromHNSecsVAyaa5_686f757273ZQBoFNaNbNiNfKlZl@Base 12
+ _D4core4time__T20splitUnitsFromHNSecsVAyaa5_6d73656373ZQBoFNaNbNiNfKlZl@Base 12
+ _D4core4time__T20splitUnitsFromHNSecsVAyaa5_7573656373ZQBoFNaNbNiNfKlZl@Base 12
+ _D4core4time__T20splitUnitsFromHNSecsVAyaa5_7765656b73ZQBoFNaNbNiNfKlZl@Base 12
+ _D4core4time__T20splitUnitsFromHNSecsVAyaa7_6d696e75746573ZQBsFNaNbNiNfKlZl@Base 12
+ _D4core4time__T20splitUnitsFromHNSecsVAyaa7_7365636f6e6473ZQBsFNaNbNiNfKlZl@Base 12
+ _D4core4time__T2toVAyaa5_6d73656373TlTxSQBmQBk12TickDurationZQBuFNaNbNiNfxQBjZl@Base 12
+ _D4core4time__T2toVAyaa5_6e73656373TlTxSQBmQBk12TickDurationZQBuFNaNbNiNfxQBjZl@Base 12
+ _D4core4time__T2toVAyaa5_7573656373TlTxSQBmQBk12TickDurationZQBuFNaNbNiNfxQBjZl@Base 12
+ _D4core4time__T2toVAyaa6_686e73656373TlTxSQBoQBm12TickDurationZQBwFNaNbNiNfxQBjZl@Base 12
+ _D4core4time__T2toVAyaa7_7365636f6e6473TlTxSQBqQBo12TickDurationZQByFNaNbNiNfxQBjZl@Base 12
+ _D4core4time__T3durVAyaa4_64617973ZQuFNaNbNiNflZSQBvQBt8Duration@Base 12
+ _D4core4time__T3durVAyaa5_686f757273ZQwFNaNbNiNflZSQBxQBv8Duration@Base 12
+ _D4core4time__T3durVAyaa5_6d73656373ZQwFNaNbNiNflZSQBxQBv8Duration@Base 12
+ _D4core4time__T3durVAyaa5_6e73656373ZQwFNaNbNiNflZSQBxQBv8Duration@Base 12
+ _D4core4time__T3durVAyaa5_7573656373ZQwFNaNbNiNflZSQBxQBv8Duration@Base 12
+ _D4core4time__T3durVAyaa5_7765656b73ZQwFNaNbNiNflZSQBxQBv8Duration@Base 12
+ _D4core4time__T3durVAyaa6_686e73656373ZQyFNaNbNiNflZSQBzQBx8Duration@Base 12
+ _D4core4time__T3durVAyaa7_6d696e75746573ZQBaFNaNbNiNflZSQCcQCa8Duration@Base 12
+ _D4core4time__T3durVAyaa7_7365636f6e6473ZQBaFNaNbNiNflZSQCcQCa8Duration@Base 12
+ _D4core4time__T7convertVAyaa4_64617973VQpa6_686e73656373ZQBqFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa4_64617973VQpa7_7365636f6e6473ZQBsFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa5_686f757273VQra6_686e73656373ZQBsFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa5_686f757273VQra7_7365636f6e6473ZQBuFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa5_6d73656373VQra6_686e73656373ZQBsFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa5_6e73656373VQra6_686e73656373ZQBsFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa5_7573656373VQra6_686e73656373ZQBsFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa5_7765656b73VQra6_686e73656373ZQBsFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa6_686e73656373VQta4_64617973ZQBqFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa6_686e73656373VQta5_686f757273ZQBsFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa6_686e73656373VQta5_6d73656373ZQBsFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa6_686e73656373VQta5_6e73656373ZQBsFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa6_686e73656373VQta5_7573656373ZQBsFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa6_686e73656373VQta5_7765656b73ZQBsFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa6_686e73656373VQta6_686e73656373ZQBuFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa6_686e73656373VQta7_6d696e75746573ZQBwFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa6_686e73656373VQta7_7365636f6e6473ZQBwFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa7_6d696e75746573VQva6_686e73656373ZQBwFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa7_7365636f6e6473VQva5_6d73656373ZQBuFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa7_7365636f6e6473VQva5_6e73656373ZQBuFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa7_7365636f6e6473VQva5_7573656373ZQBuFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa7_7365636f6e6473VQva6_686e73656373ZQBwFNaNbNiNflZl@Base 12
+ _D4core4time__T7convertVAyaa7_7365636f6e6473VQva7_7365636f6e6473ZQByFNaNbNiNflZl@Base 12
+ _D4core5bitop11__moduleRefZ@Base 12
+ _D4core5bitop12__ModuleInfoZ@Base 12
+ _D4core5bitop2btFNaNbNiMxPmmZi@Base 12
+ _D4core5bitop3bsfFNaNbNiNfkZi@Base 12
+ _D4core5bitop3bsfFNaNbNiNfmZi@Base 12
+ _D4core5bitop3bsrFNaNbNiNfkZi@Base 12
+ _D4core5bitop3bsrFNaNbNiNfmZi@Base 12
+ _D4core5bitop6popcntFNaNbNiNfkZi@Base 12
+ _D4core5bitop6popcntFNaNbNiNfmZi@Base 12
+ _D4core5bitop7Split646__ctorMFNaNbNcNiNfmZSQBpQBnQBk@Base 12
+ _D4core5bitop7Split646__initZ@Base 12
+ _D4core5bitop7bitswapFNaNbNiNfkZk@Base 12
+ _D4core5bitop7bitswapFNaNbNiNfmZm@Base 12
+ _D4core5bitop8BitRange5emptyMxFNaNbNiNfZb@Base 12
+ _D4core5bitop8BitRange5frontMFNaNbNiNfZm@Base 12
+ _D4core5bitop8BitRange6__ctorMFNaNbNcNiPxmmZSQBrQBpQBm@Base 12
+ _D4core5bitop8BitRange6__initZ@Base 12
+ _D4core5bitop8BitRange8popFrontMFNaNbNiZv@Base 12
+ _D4core5bitop8byteswapFNaNbNiNftZt@Base 12
+ _D4core5bitop__T10softPopcntTkZQpFNaNbNiNfkZi@Base 12
+ _D4core5bitop__T10softPopcntTmZQpFNaNbNiNfmZi@Base 12
+ _D4core5bitop__T11softBitswapTkZQqFNaNbNiNfkZk@Base 12
+ _D4core5bitop__T11softBitswapTmZQqFNaNbNiNfmZm@Base 12
+ _D4core5bitop__T3rolTkZQhFNaNbNiNfxkxkZk@Base 12
+ _D4core5bitop__T3rorTkZQhFNaNbNiNfxkxkZk@Base 12
+ _D4core5bitop__T3rorTmZQhFNaNbNiNfxmxkZm@Base 12
+ _D4core5bitop__T8softScanTkVbi0ZQqFNaNbNiNfkZi@Base 12
+ _D4core5bitop__T8softScanTkVbi1ZQqFNaNbNiNfkZi@Base 12
+ _D4core5bitop__T8softScanTmVbi0ZQqFNaNbNiNfmZi@Base 12
+ _D4core5bitop__T8softScanTmVbi1ZQqFNaNbNiNfmZi@Base 12
+ _D4core5cpuid10_hasPopcntyb@Base 12
+ _D4core5cpuid10_hasRdrandyb@Base 12
+ _D4core5cpuid10_hasRdseedyb@Base 12
+ _D4core5cpuid10_isItaniumyb@Base 12
+ _D4core5cpuid10_processoryAa@Base 12
+ _D4core5cpuid10_x87onChipyb@Base 12
+ _D4core5cpuid10dataCachesFNaNbNdNiNeZxG5SQBnQBl9CacheInfo@Base 12
+ _D4core5cpuid11CpuFeatures11__xopEqualsMxFKxSQBrQBpQBmZb@Base 12
+ _D4core5cpuid11CpuFeatures6__initZ@Base 12
+ _D4core5cpuid11CpuFeatures9__xtoHashFNbNeKxSQBqQBoQBlZm@Base 12
+ _D4core5cpuid11__moduleRefZ@Base 12
+ _D4core5cpuid11_dataCachesyG5SQBcQBa9CacheInfo@Base 12
+ _D4core5cpuid11amd3dnowExtFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid11cacheLevelsFNbNdNiNeZk@Base 12
+ _D4core5cpuid11coresPerCPUFNaNbNdNiNeZk@Base 12
+ _D4core5cpuid11cpuFeaturesSQzQw11CpuFeatures@Base 12
+ _D4core5cpuid11hasLahfSahfFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid12__ModuleInfoZ@Base 12
+ _D4core5cpuid12_amd3dnowExtyb@Base 12
+ _D4core5cpuid12_coresPerCPUyk@Base 12
+ _D4core5cpuid12_hasLahfSahfyb@Base 12
+ _D4core5cpuid12getCpuInfo0BFNbNiNeZv@Base 12
+ _D4core5cpuid12hasCmpxchg8bFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid12hasPclmulqdqFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid12preferAthlonFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid13_hasCmpxchg8byb@Base 12
+ _D4core5cpuid13_hasPclmulqdqyb@Base 12
+ _D4core5cpuid13_preferAthlonyb@Base 12
+ _D4core5cpuid13hasCmpxchg16bFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid13hasVpclmulqdqFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid13threadsPerCPUFNaNbNdNiNeZk@Base 12
+ _D4core5cpuid14_hasCmpxchg16byb@Base 12
+ _D4core5cpuid14_hasVpclmulqdqyb@Base 12
+ _D4core5cpuid14_threadsPerCPUyk@Base 12
+ _D4core5cpuid14getCpuFeaturesFNbNiNeZPSQBlQBj11CpuFeatures@Base 12
+ _D4core5cpuid14hyperThreadingFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid14numCacheLevelsk@Base 12
+ _D4core5cpuid14preferPentium1FNaNbNdNiNeZb@Base 12
+ _D4core5cpuid14preferPentium4FNaNbNdNiNeZb@Base 12
+ _D4core5cpuid15_hyperThreadingyb@Base 12
+ _D4core5cpuid15_preferPentium1yb@Base 12
+ _D4core5cpuid15_preferPentium4yb@Base 12
+ _D4core5cpuid15getAMDcacheinfoFNbNiNeZ8assocmapyAh@Base 12
+ _D4core5cpuid15getAMDcacheinfoFNbNiNeZv@Base 12
+ _D4core5cpuid16has3dnowPrefetchFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid17_has3dnowPrefetchyb@Base 12
+ _D4core5cpuid17hyperThreadingBitFNbNdNiNeZb@Base 12
+ _D4core5cpuid18getcacheinfoCPUID2FNbNiNeZ14decipherCpuid2MFNbNihZ3idsyG63h@Base 12
+ _D4core5cpuid18getcacheinfoCPUID2FNbNiNeZ14decipherCpuid2MFNbNihZ4waysyG63h@Base 12
+ _D4core5cpuid18getcacheinfoCPUID2FNbNiNeZ14decipherCpuid2MFNbNihZ5sizesyG63k@Base 12
+ _D4core5cpuid18getcacheinfoCPUID2FNbNiNeZv@Base 12
+ _D4core5cpuid18getcacheinfoCPUID4FNbNiNeZv@Base 12
+ _D4core5cpuid18hasSysEnterSysExitFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid18max_extended_cpuidk@Base 12
+ _D4core5cpuid19_hasSysEnterSysExityb@Base 12
+ _D4core5cpuid26_sharedStaticCtor_L1068_C1FNbNiNeZv@Base 12
+ _D4core5cpuid3aesFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid3avxFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid3fmaFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid3hleFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid3mmxFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid3rtmFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid3sseFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid4_aesyb@Base 12
+ _D4core5cpuid4_avxyb@Base 12
+ _D4core5cpuid4_fmayb@Base 12
+ _D4core5cpuid4_hleyb@Base 12
+ _D4core5cpuid4_mmxyb@Base 12
+ _D4core5cpuid4_rtmyb@Base 12
+ _D4core5cpuid4_sseyb@Base 12
+ _D4core5cpuid4avx2FNaNbNdNiNeZb@Base 12
+ _D4core5cpuid4sse2FNaNbNdNiNeZb@Base 12
+ _D4core5cpuid4sse3FNaNbNdNiNeZb@Base 12
+ _D4core5cpuid4vaesFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid5_avx2yb@Base 12
+ _D4core5cpuid5_sse2yb@Base 12
+ _D4core5cpuid5_sse3yb@Base 12
+ _D4core5cpuid5_vaesyb@Base 12
+ _D4core5cpuid5fp16cFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid5modelk@Base 12
+ _D4core5cpuid5sse41FNaNbNdNiNeZb@Base 12
+ _D4core5cpuid5sse42FNaNbNdNiNeZb@Base 12
+ _D4core5cpuid5sse4aFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid5ssse3FNaNbNdNiNeZb@Base 12
+ _D4core5cpuid6_fp16cyb@Base 12
+ _D4core5cpuid6_sse41yb@Base 12
+ _D4core5cpuid6_sse42yb@Base 12
+ _D4core5cpuid6_sse4ayb@Base 12
+ _D4core5cpuid6_ssse3yb@Base 12
+ _D4core5cpuid6amdMmxFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid6familyk@Base 12
+ _D4core5cpuid6hasShaFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid6vendorFNaNbNdNiNeZAya@Base 12
+ _D4core5cpuid7_amdMmxyb@Base 12
+ _D4core5cpuid7_hasShayb@Base 12
+ _D4core5cpuid7_vendoryAa@Base 12
+ _D4core5cpuid7hasCmovFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid7hasFxsrFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid8_hasCmovyb@Base 12
+ _D4core5cpuid8_hasFxsryb@Base 12
+ _D4core5cpuid8amd3dnowFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid8cpuidX86FNbNiNeZv@Base 12
+ _D4core5cpuid8hasCPUIDFNbNiNeZb@Base 12
+ _D4core5cpuid8hasLzcntFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid8hasRdtscFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid8isX86_64FNaNbNdNiNeZb@Base 12
+ _D4core5cpuid8steppingk@Base 12
+ _D4core5cpuid9CacheInfo6__initZ@Base 12
+ _D4core5cpuid9_amd3dnowyb@Base 12
+ _D4core5cpuid9_hasLzcntyb@Base 12
+ _D4core5cpuid9_hasRdtscyb@Base 12
+ _D4core5cpuid9_isX86_64yb@Base 12
+ _D4core5cpuid9datacacheG5SQyQv9CacheInfo@Base 12
+ _D4core5cpuid9hasPopcntFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid9hasRdrandFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid9hasRdseedFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid9isItaniumFNaNbNdNiNeZb@Base 12
+ _D4core5cpuid9max_cpuidk@Base 12
+ _D4core5cpuid9processorFNaNbNdNiNeZAya@Base 12
+ _D4core5cpuid9x87onChipFNaNbNdNiNeZb@Base 12
+ _D4core6atomic11__moduleRefZ@Base 12
+ _D4core6atomic12__ModuleInfoZ@Base 12
+ _D4core6atomic5pauseFNaNbNiNfZv@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi0TPOS2rt9critical_18D_CRITICAL_SECTIONZQCvFNaNbNiNeNkMKOxPSQCcQCcQBvZQCp@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi0TPOS2rt9critical_18D_CRITICAL_SECTIONZQCvFNaNbNiNeNkMKxPOxSQCdQCdQBwZQCq@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi0TbZQBmFNaNbNiNeKOxbZb@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi0TbZQBmFNaNbNiNeKxbZb@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi0TmZQBmFNaNbNiNeKOxmZm@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi0TmZQBmFNaNbNiNeKxmZm@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi2TC3std12experimental6loggerQCz6LoggerZQCvFNaNbNiNeKOxCQCaQBzQBoQEjQBkZOCQCsQCrQCgQFbQCc@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi2TCQCa4sync5mutex5MutexZQCgFNaNbNiNeKOxCQDlQBlQBjQBgZOCQEaQCaQByQBv@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi2TE3std12experimental6loggerQCz8LogLevelZQCxFNaNbNiNeKOxEQCcQCbQBqQElQBmZQCt@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi2TE3std12experimental6loggerQCz8LogLevelZQCxFNaNbNiNeKxEQCbQCaQBpQEkQBlZQCs@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi2TPOS2rt8monitor_7MonitorZQCiFNaNbNiNeNkMKOxPSQBpQBpQBjZQCc@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi2TPOS2rt8monitor_7MonitorZQCiFNaNbNiNeNkMKxPOxSQBqQBqQBkZQCd@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi2TPOS2rt9critical_18D_CRITICAL_SECTIONZQCvFNaNbNiNeNkMKOxPSQCcQCcQBvZQCp@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi2TPOS2rt9critical_18D_CRITICAL_SECTIONZQCvFNaNbNiNeNkMKxPOxSQCdQCdQBwZQCq@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi2TbZQBmFNaNbNiNeKOxbZb@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi2TbZQBmFNaNbNiNeKxbZb@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi2TkZQBmFNaNbNiNeKOxkZk@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi2TkZQBmFNaNbNiNeKxkZk@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi5TE3std11parallelism8TaskPool9PoolStateZQCwFNaNbNiNeKOxEQCbQCaQBqQBkZQCp@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi5TE3std11parallelism8TaskPool9PoolStateZQCwFNaNbNiNeKxEQCaQBzQBpQBjZQCo@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi5TbZQBmFNaNbNiNeKOxbZb@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi5TbZQBmFNaNbNiNeKxbZb@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi5ThZQBmFNaNbNiNeKOxhZh@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi5ThZQBmFNaNbNiNeKxhZh@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi5TiZQBmFNaNbNiNeKOxiZi@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi5TiZQBmFNaNbNiNeKxiZi@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi5TkZQBmFNaNbNiNeKOxkZk@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi5TkZQBmFNaNbNiNeKxkZk@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi5TmZQBmFNaNbNiNeKOxmZm@Base 12
+ _D4core6atomic__T10atomicLoadVEQBdQBb11MemoryOrderi5TmZQBmFNaNbNiNeKxmZm@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi0TbTbZQBpFNaNbNiNeKObbZv@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi0TbTbZQBpFNaNbNiNeKbbZv@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi0TmTiZQBpFNaNbNiNeKOmiZv@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi0TmTiZQBpFNaNbNiNeKmiZv@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi3TC3std12experimental6loggerQDa6LoggerTQBlZQDaFNaNbNiNeKOCQCdQCcQBrQEnQBnOQrZv@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi3TE3std12experimental6loggerQDa8LogLevelTQBnZQDcFNaNbNiNeKOEQCfQCeQBtQEpQBpQCvZv@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi3TE3std12experimental6loggerQDa8LogLevelTQBnZQDcFNaNbNiNeKQCeQChZv@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi3TPOS2rt8monitor_7MonitorTQyZQCmFNaNbNiNeKOPQBoQBtZv@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi3TPOS2rt8monitor_7MonitorTQyZQCmFNaNbNiNeKQBoQBrZv@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi3TPOS2rt9critical_18D_CRITICAL_SECTIONTQBlZQDaFNaNbNiNeKOPQCcQChZv@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi3TPOS2rt9critical_18D_CRITICAL_SECTIONTQBlZQDaFNaNbNiNeKQCcQCfZv@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi3TbTbZQBpFNaNbNiNeKObbZv@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi3TbTbZQBpFNaNbNiNeKbbZv@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi3TmTmZQBpFNaNbNiNeKOmmZv@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi3TmTmZQBpFNaNbNiNeKmmZv@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi5TE3std11parallelism8TaskPool9PoolStateTQBmZQDbFNaNbNiNeKOEQCeQCdQBtQBnQCrZv@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi5TE3std11parallelism8TaskPool9PoolStateTQBmZQDbFNaNbNiNeKQCdQCgZv@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi5ThThZQBpFNaNbNiNeKOhhZv@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi5ThThZQBpFNaNbNiNeKhhZv@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi5TkTkZQBpFNaNbNiNeKOkkZv@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi5TkTkZQBpFNaNbNiNeKkkZv@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi5TmTmZQBpFNaNbNiNeKOmmZv@Base 12
+ _D4core6atomic__T11atomicStoreVEQBeQBc11MemoryOrderi5TmTmZQBpFNaNbNiNeKmmZv@Base 12
+ _D4core6atomic__T14atomicFetchAddVEQBhQBf11MemoryOrderi5TkZQBqFNaNbNiNeKOkmZk@Base 12
+ _D4core6atomic__T14atomicFetchAddVEQBhQBf11MemoryOrderi5TkZQBqFNaNbNiNeKkmZk@Base 12
+ _D4core6atomic__T14atomicFetchAddVEQBhQBf11MemoryOrderi5TmZQBqFNaNbNiNeKOmmZm@Base 12
+ _D4core6atomic__T14atomicFetchAddVEQBhQBf11MemoryOrderi5TmZQBqFNaNbNiNeKmmZm@Base 12
+ _D4core6atomic__T14atomicFetchSubVEQBhQBf11MemoryOrderi5TkZQBqFNaNbNiNeKOkmZk@Base 12
+ _D4core6atomic__T14atomicFetchSubVEQBhQBf11MemoryOrderi5TkZQBqFNaNbNiNeKkmZk@Base 12
+ _D4core6atomic__T14atomicFetchSubVEQBhQBf11MemoryOrderi5TmZQBqFNaNbNiNeKOmmZm@Base 12
+ _D4core6atomic__T14atomicFetchSubVEQBhQBf11MemoryOrderi5TmZQBqFNaNbNiNeKmmZm@Base 12
+ _D4core6atomic__T26atomicPtrIsProperlyAlignedTE3std11parallelism8TaskPool9PoolStateZQCpFNaNbNiNfPQBzZb@Base 12
+ _D4core6atomic__T26atomicPtrIsProperlyAlignedTOCQBu4sync5mutex5MutexZQCaFNaNbNiNfPOQBkZb@Base 12
+ _D4core6atomic__T26atomicPtrIsProperlyAlignedTOE3std11parallelism8TaskPool9PoolStateZQCqFNaNbNiNfPOQCaZb@Base 12
+ _D4core6atomic__T26atomicPtrIsProperlyAlignedTObZQBgFNaNbNiNfPObZb@Base 12
+ _D4core6atomic__T26atomicPtrIsProperlyAlignedTOhZQBgFNaNbNiNfPOhZb@Base 12
+ _D4core6atomic__T26atomicPtrIsProperlyAlignedTOkZQBgFNaNbNiNfPOkZb@Base 12
+ _D4core6atomic__T26atomicPtrIsProperlyAlignedTOmZQBgFNaNbNiNfPOmZb@Base 12
+ _D4core6atomic__T26atomicPtrIsProperlyAlignedTOtZQBgFNaNbNiNfPOtZb@Base 12
+ _D4core6atomic__T26atomicPtrIsProperlyAlignedTbZQBfFNaNbNiNfPbZb@Base 12
+ _D4core6atomic__T26atomicPtrIsProperlyAlignedThZQBfFNaNbNiNfPhZb@Base 12
+ _D4core6atomic__T26atomicPtrIsProperlyAlignedTkZQBfFNaNbNiNfPkZb@Base 12
+ _D4core6atomic__T26atomicPtrIsProperlyAlignedTmZQBfFNaNbNiNfPmZb@Base 12
+ _D4core6atomic__T26atomicPtrIsProperlyAlignedTtZQBfFNaNbNiNfPtZb@Base 12
+ _D4core6atomic__T28atomicValueIsProperlyAlignedTOkZQBiFNaNbNiNeKOkZb@Base 12
+ _D4core6atomic__T28atomicValueIsProperlyAlignedTOmZQBiFNaNbNiNeKOmZb@Base 12
+ _D4core6atomic__T28atomicValueIsProperlyAlignedTkZQBhFNaNbNiNeKkZb@Base 12
+ _D4core6atomic__T28atomicValueIsProperlyAlignedTmZQBhFNaNbNiNeKmZb@Base 12
+ _D4core6atomic__T3casZ__TQiTCQBb4sync5mutex5MutexTnTQyZQBmFNaNbNiNePOCQCqQBpQBnQBkOnOQqZb@Base 12
+ _D4core6atomic__T3casZ__TQiTE3std11parallelism8TaskPool9PoolStateTQBmTQBqZQCfFNaNbNiNePOEQCiQChQBxQBrQCvQCyZb@Base 12
+ _D4core6atomic__T3casZ__TQiTE3std11parallelism8TaskPool9PoolStateTQBmTQBqZQCfFNaNbNiNePQChQCkQCnZb@Base 12
+ _D4core6atomic__T3casZ__TQiTbTbTbZQrFNaNbNiNePObbbZb@Base 12
+ _D4core6atomic__T3casZ__TQiTbTbTbZQrFNaNbNiNePbbbZb@Base 12
+ _D4core6atomic__T3casZ__TQiThThThZQrFNaNbNiNePOhhhZb@Base 12
+ _D4core6atomic__T3casZ__TQiThThThZQrFNaNbNiNePhhhZb@Base 12
+ _D4core6atomic__T3casZ__TQiTmTmTmZQrFNaNbNiNePOmmmZb@Base 12
+ _D4core6atomic__T3casZ__TQiTmTmTmZQrFNaNbNiNePmmmZb@Base 12
+ _D4core6atomic__T3casZ__TQiTtTtTtZQrFNaNbNiNePOtttZb@Base 12
+ _D4core6atomic__T3casZ__TQiTtTtTtZQrFNaNbNiNePtttZb@Base 12
+ _D4core6atomic__T8atomicOpVAyaa2_2b3dTkTiZQzFNaNbNiNfKOkiZk@Base 12
+ _D4core6atomic__T8atomicOpVAyaa2_2b3dTkTkZQzFNaNbNiNfKOkkZk@Base 12
+ _D4core6atomic__T8atomicOpVAyaa2_2b3dTmTiZQzFNaNbNiNfKOmiZm@Base 12
+ _D4core6atomic__T8atomicOpVAyaa2_2b3dTmTmZQzFNaNbNiNfKOmmZm@Base 12
+ _D4core6atomic__T8atomicOpVAyaa2_2d3dTkTiZQzFNaNbNiNfKOkiZk@Base 12
+ _D4core6atomic__T8atomicOpVAyaa2_2d3dTmTiZQzFNaNbNiNfKOmiZm@Base 12
+ _D4core6atomic__T8atomicOpVAyaa2_2d3dTmTmZQzFNaNbNiNfKOmmZm@Base 12
+ _D4core6int12811__moduleRefZ@Base 12
+ _D4core6int12812__ModuleInfoZ@Base 12
+ _D4core6int1282geFNaNbNiNfSQzQw4CentQkZb@Base 12
+ _D4core6int1282gtFNaNbNiNfSQzQw4CentQkZb@Base 12
+ _D4core6int1282leFNaNbNiNfSQzQw4CentQkZb@Base 12
+ _D4core6int1282ltFNaNbNiNfSQzQw4CentQkZb@Base 12
+ _D4core6int1282orFNaNbNiNfSQzQw4CentQkZQn@Base 12
+ _D4core6int1283addFNaNbNiNfSQBaQy4CentQlZQo@Base 12
+ _D4core6int1283andFNaNbNiNfSQBaQy4CentQlZQo@Base 12
+ _D4core6int1283comFNaNbNiNfSQBaQy4CentZQm@Base 12
+ _D4core6int1283decFNaNbNiNfSQBaQy4CentZQm@Base 12
+ _D4core6int1283divFNaNbNiNfSQBaQy4CentQlZQo@Base 12
+ _D4core6int1283incFNaNbNiNfSQBaQy4CentZQm@Base 12
+ _D4core6int1283mulFNaNbNiNfSQBaQy4CentQlZQo@Base 12
+ _D4core6int1283negFNaNbNiNfSQBaQy4CentZQm@Base 12
+ _D4core6int1283rolFNaNbNiNfSQBaQy4CentkZQn@Base 12
+ _D4core6int1283rorFNaNbNiNfSQBaQy4CentkZQn@Base 12
+ _D4core6int1283sarFNaNbNiNfSQBaQy4CentkZQn@Base 12
+ _D4core6int1283shlFNaNbNiNfSQBaQy4CentkZQn@Base 12
+ _D4core6int1283shrFNaNbNiNfSQBaQy4CentkZQn@Base 12
+ _D4core6int1283subFNaNbNiNfSQBaQy4CentQlZQo@Base 12
+ _D4core6int1283tstFNaNbNiNfSQBaQy4CentZb@Base 12
+ _D4core6int1283ugeFNaNbNiNfSQBaQy4CentQlZb@Base 12
+ _D4core6int1283ugtFNaNbNiNfSQBaQy4CentQlZb@Base 12
+ _D4core6int1283uleFNaNbNiNfSQBaQy4CentQlZb@Base 12
+ _D4core6int1283ultFNaNbNiNfSQBaQy4CentQlZb@Base 12
+ _D4core6int1283xorFNaNbNiNfSQBaQy4CentQlZQo@Base 12
+ _D4core6int1284Cent6__initZ@Base 12
+ _D4core6int1284rol1FNaNbNiNfSQBbQz4CentZQm@Base 12
+ _D4core6int1284ror1FNaNbNiNfSQBbQz4CentZQm@Base 12
+ _D4core6int1284sar1FNaNbNiNfSQBbQz4CentZQm@Base 12
+ _D4core6int1284shl1FNaNbNiNfSQBbQz4CentZQm@Base 12
+ _D4core6int1284shr1FNaNbNiNfSQBbQz4CentZQm@Base 12
+ _D4core6int1284udivFNaNbNiNfSQBbQz4CentQlZQo@Base 12
+ _D4core6int1286divmodFNaNbNiNfSQBdQBb4CentQmJQpZQs@Base 12
+ _D4core6int1287udivmodFNaNbNiNfSQBeQBc4CentQmJQpZ13udivmod128_64FNaNbNiNfQBqmJmZm@Base 12
+ _D4core6int1287udivmodFNaNbNiNfSQBeQBc4CentQmJQpZ13udivmod128_64FQBimJmZ9udiv96_64FNaNbNiNfmkmZk@Base 12
+ _D4core6int1287udivmodFNaNbNiNfSQBeQBc4CentQmJQpZQs@Base 12
+ _D4core6memory10initialize@Base 12
+ _D4core6memory11__moduleRefZ@Base 12
+ _D4core6memory12__ModuleInfoZ@Base 12
+ _D4core6memory2GC12ProfileStats6__initZ@Base 12
+ _D4core6memory2GC12profileStatsFNbNiNfZSQBmQBkQBg12ProfileStats@Base 12
+ _D4core6memory2GC5Stats6__initZ@Base 12
+ _D4core6memory2GC5queryFNaNbNkMPvZSQBhQBf8BlkInfo_@Base 12
+ _D4core6memory2GC5queryFNbNkMxPvZSQBgQBe8BlkInfo_@Base 12
+ _D4core6memory2GC5statsFNbNiNfZSQBeQBcQy5Stats@Base 12
+ _D4core6memory2GC6__initZ@Base 12
+ _D4core6memory2GC6addrOfFNaNbNiNePNgvZQf@Base 12
+ _D4core6memory2GC6addrOfFNaNbNiNePvZQd@Base 12
+ _D4core6memory2GC6sizeOfFNaNbNiPvZm@Base 12
+ _D4core6memory2GC6sizeOfFNbNiMxPvZm@Base 12
+ _D4core6memory2GC7clrAttrFNaNbPvkZk@Base 12
+ _D4core6memory2GC7clrAttrFNbMxPvkZk@Base 12
+ _D4core6memory2GC7getAttrFNaNbPvZk@Base 12
+ _D4core6memory2GC7getAttrFNbMxPvZk@Base 12
+ _D4core6memory2GC7setAttrFNaNbPvkZk@Base 12
+ _D4core6memory2GC7setAttrFNbMxPvkZk@Base 12
+ _D4core6memory8BlkInfo_6__initZ@Base 12
+ _D4core6memory8pageSizeym@Base 12
+ _D4core6memory__T10pureMallocZQnFNaNbNiNemZPv@Base 12
+ _D4core6memory__T11pureReallocZQoFNaNbNiPvmZQe@Base 12
+ _D4core6memory__T8pureFreeZQkFNaNbNiPvZv@Base 12
+ _D4core6stdcpp11string_view11__moduleRefZ@Base 12
+ _D4core6stdcpp11string_view12__ModuleInfoZ@Base 12
+ _D4core6stdcpp11string_view__T11char_traitsTaZQq6__initZ@Base 12
+ _D4core6stdcpp11string_view__T11char_traitsTuZQq6__initZ@Base 12
+ _D4core6stdcpp11string_view__T11char_traitsTwZQq6__initZ@Base 12
+ _D4core6stdcpp11string_view__T17basic_string_viewTaTSQBzQBxQBt__T11char_traitsTaZQqZQCc6__initZ@Base 12
+ _D4core6stdcpp11string_view__T17basic_string_viewTuTSQBzQBxQBt__T11char_traitsTuZQqZQCc6__initZ@Base 12
+ _D4core6stdcpp11string_view__T17basic_string_viewTwTSQBzQBxQBt__T11char_traitsTwZQqZQCc6__initZ@Base 12
+ _D4core6stdcpp11type_traits11__moduleRefZ@Base 12
+ _D4core6stdcpp11type_traits12__ModuleInfoZ@Base 12
+ _D4core6stdcpp11type_traits__T17integral_constantTbVbi0ZQBa6__initZ@Base 12
+ _D4core6stdcpp11type_traits__T17integral_constantTbVbi1ZQBa6__initZ@Base 12
+ _D4core6stdcpp4new_11__moduleRefZ@Base 12
+ _D4core6stdcpp4new_11align_val_t6__initZ@Base 12
+ _D4core6stdcpp4new_11std_nothrowySQBgQBeQBa9nothrow_t@Base 12
+ _D4core6stdcpp4new_12__ModuleInfoZ@Base 12
+ _D4core6stdcpp4new_9bad_alloc6__initZ@Base 12
+ _D4core6stdcpp4new_9bad_alloc6__vtblZ@Base 12
+ _D4core6stdcpp4new_9bad_alloc7__ClassZ@Base 12
+ _D4core6stdcpp4new_9nothrow_t6__initZ@Base 12
+ _D4core6stdcpp5array11__moduleRefZ@Base 12
+ _D4core6stdcpp5array12__ModuleInfoZ@Base 12
+ _D4core6stdcpp6memory11__moduleRefZ@Base 12
+ _D4core6stdcpp6memory12__ModuleInfoZ@Base 12
+ _D4core6stdcpp6string11__moduleRefZ@Base 12
+ _D4core6stdcpp6string12__ModuleInfoZ@Base 12
+ _D4core6stdcpp6vector11__moduleRefZ@Base 12
+ _D4core6stdcpp6vector12__ModuleInfoZ@Base 12
+ _D4core6stdcpp7utility11__moduleRefZ@Base 12
+ _D4core6stdcpp7utility12__ModuleInfoZ@Base 12
+ _D4core6stdcpp8typeinfo10bad_typeid6__ctorMFNbNiZCQBwQBuQBqQBk@Base 12
+ _D4core6stdcpp8typeinfo10bad_typeid6__initZ@Base 12
+ _D4core6stdcpp8typeinfo10bad_typeid6__vtblZ@Base 12
+ _D4core6stdcpp8typeinfo10bad_typeid7__ClassZ@Base 12
+ _D4core6stdcpp8typeinfo11__moduleRefZ@Base 12
+ _D4core6stdcpp8typeinfo12__ModuleInfoZ@Base 12
+ _D4core6stdcpp8typeinfo8bad_cast6__ctorMFNbNiZCQBtQBrQBnQBh@Base 12
+ _D4core6stdcpp8typeinfo8bad_cast6__initZ@Base 12
+ _D4core6stdcpp8typeinfo8bad_cast6__vtblZ@Base 12
+ _D4core6stdcpp8typeinfo8bad_cast7__ClassZ@Base 12
+ _D4core6stdcpp8typeinfo9type_info6__ctorMFNiPxaZCQBvQBtQBpQBj@Base 12
+ _D4core6stdcpp8typeinfo9type_info6__initZ@Base 12
+ _D4core6stdcpp8typeinfo9type_info6__vtblZ@Base 12
+ _D4core6stdcpp8typeinfo9type_info7__ClassZ@Base 12
+ _D4core6stdcpp8xutility11__moduleRefZ@Base 12
+ _D4core6stdcpp8xutility12__ModuleInfoZ@Base 12
+ _D4core6stdcpp8xutility14CppStdRevision6__initZ@Base 12
+ _D4core6stdcpp9allocator11__moduleRefZ@Base 12
+ _D4core6stdcpp9allocator12__ModuleInfoZ@Base 12
+ _D4core6stdcpp9exception11__moduleRefZ@Base 12
+ _D4core6stdcpp9exception12__ModuleInfoZ@Base 12
+ _D4core6stdcpp9exception13bad_exception6__ctorMFNbNiPxaZCQCdQCbQBxQBq@Base 12
+ _D4core6stdcpp9exception13bad_exception6__initZ@Base 12
+ _D4core6stdcpp9exception13bad_exception6__vtblZ@Base 12
+ _D4core6stdcpp9exception13bad_exception7__ClassZ@Base 12
+ _D4core6stdcpp9exceptionQk6__ctorMFNbNiPxaiZCQBrQBpQBlQBo@Base 12
+ _D4core6stdcpp9exceptionQk6__ctorMFNbNiZCQBnQBlQBhQBk@Base 12
+ _D4core6stdcpp9exceptionQk6__initZ@Base 12
+ _D4core6stdcpp9exceptionQk6__vtblZ@Base 12
+ _D4core6stdcpp9exceptionQk7__ClassZ@Base 12
+ _D4core6thread10threadbase10ThreadBase10popContextMFNbNiZv@Base 12
+ _D4core6thread10threadbase10ThreadBase10topContextMFNbNiZPSQCfQCd7context12StackContext@Base 12
+ _D4core6thread10threadbase10ThreadBase11pushContextMFNbNiPSQCfQCd7context12StackContextZv@Base 12
+ _D4core6thread10threadbase10ThreadBase12isMainThreadMFNbNdNiZb@Base 12
+ _D4core6thread10threadbase10ThreadBase13nAboutToStartm@Base 12
+ _D4core6thread10threadbase10ThreadBase13pAboutToStartPCQCbQBzQBvQBm@Base 12
+ _D4core6thread10threadbase10ThreadBase13tlsGCdataInitMFNbNiZv@Base 12
+ _D4core6thread10threadbase10ThreadBase15initDataStorageMFNbZv@Base 12
+ _D4core6thread10threadbase10ThreadBase18criticalRegionLockFNbNdNiZCQCn4sync5mutex5Mutex@Base 12
+ _D4core6thread10threadbase10ThreadBase18destroyDataStorageMFNbNiZv@Base 12
+ _D4core6thread10threadbase10ThreadBase18destructBeforeDtorMFNbNiZb@Base 12
+ _D4core6thread10threadbase10ThreadBase19_criticalRegionLockG72v@Base 12
+ _D4core6thread10threadbase10ThreadBase25destroyDataStorageIfAvailMFNbNiZv@Base 12
+ _D4core6thread10threadbase10ThreadBase2idMFNdNiNfZm@Base 12
+ _D4core6thread10threadbase10ThreadBase3addFNbNiCQBuQBsQBoQBfbZv@Base 12
+ _D4core6thread10threadbase10ThreadBase3addFNbNiPSQBvQBt7context12StackContextZv@Base 12
+ _D4core6thread10threadbase10ThreadBase3runMFZv@Base 12
+ _D4core6thread10threadbase10ThreadBase4nameMFNdNiNfAyaZv@Base 12
+ _D4core6thread10threadbase10ThreadBase4nameMFNdNiNfZAya@Base 12
+ _D4core6thread10threadbase10ThreadBase5slockFNbNdNiZCQBz4sync5mutex5Mutex@Base 12
+ _D4core6thread10threadbase10ThreadBase5yieldFNbNiZv@Base 12
+ _D4core6thread10threadbase10ThreadBase6__ctorMFNaNbNiNfDFZvmZCQCiQCgQCcQBt@Base 12
+ _D4core6thread10threadbase10ThreadBase6__ctorMFNaNbNiNfPFZvmZCQCiQCgQCcQBt@Base 12
+ _D4core6thread10threadbase10ThreadBase6__ctorMFNaNbNiNfmZCQCeQCcQByQBp@Base 12
+ _D4core6thread10threadbase10ThreadBase6__initZ@Base 12
+ _D4core6thread10threadbase10ThreadBase6__vtblZ@Base 12
+ _D4core6thread10threadbase10ThreadBase6_slockG72v@Base 12
+ _D4core6thread10threadbase10ThreadBase6getAllFZ6resizeFNaNbNfKACQCkQCiQCeQBvmZv@Base 12
+ _D4core6thread10threadbase10ThreadBase6getAllFZACQBvQBtQBpQBg@Base 12
+ _D4core6thread10threadbase10ThreadBase6removeFNbNiCQBxQBvQBrQBiZv@Base 12
+ _D4core6thread10threadbase10ThreadBase6removeFNbNiPSQByQBw7context12StackContextZv@Base 12
+ _D4core6thread10threadbase10ThreadBase7__ClassZ@Base 12
+ _D4core6thread10threadbase10ThreadBase7getThisFNbNiNfZCQCbQBzQBvQBm@Base 12
+ _D4core6thread10threadbase10ThreadBase7opApplyFMDFKCQByQBwQBsQBjZiZ6resizeFNbNiKAQBemZv@Base 12
+ _D4core6thread10threadbase10ThreadBase7opApplyFMDFKCQByQBwQBsQBjZiZi@Base 12
+ _D4core6thread10threadbase10ThreadBase7setThisFNbNiCQByQBwQBsQBjZv@Base 12
+ _D4core6thread10threadbase10ThreadBase7sm_cbegPSQBuQBs7context12StackContext@Base 12
+ _D4core6thread10threadbase10ThreadBase7sm_mainCQBtQBrQBnQBe@Base 12
+ _D4core6thread10threadbase10ThreadBase7sm_tbegCQBtQBrQBnQBe@Base 12
+ _D4core6thread10threadbase10ThreadBase7sm_thisCQBtQBrQBnQBe@Base 12
+ _D4core6thread10threadbase10ThreadBase7sm_tlenm@Base 12
+ _D4core6thread10threadbase10ThreadBase8isDaemonMFNdNiNfZb@Base 12
+ _D4core6thread10threadbase10ThreadBase8isDaemonMFNdNiNfbZv@Base 12
+ _D4core6thread10threadbase10ThreadBase9initLocksFNiZv@Base 12
+ _D4core6thread10threadbase10ThreadBase9isRunningMFNbNdNiZb@Base 12
+ _D4core6thread10threadbase10ThreadBase9termLocksFNiZv@Base 12
+ _D4core6thread10threadbase10ThreadBase__T10getAllImplS_DQCcQCaQBwQBn6getAllFZ6resizeFNaNbNfKACQDoQDmQDiQCzmZvZQCrFZQx@Base 12
+ _D4core6thread10threadbase10ThreadBase__T10getAllImplS_DQCcQCaQBwQBn7opApplyFMDFKCQDcQDaQCwQCnZiZ6resizeFNbNiKAQBemZvZQCzFNiZQp@Base 12
+ _D4core6thread10threadbase11ThreadError6__ctorMFNaNbNiNfAyaC6object9ThrowableQvmZCQDcQDaQCwQCn@Base 12
+ _D4core6thread10threadbase11ThreadError6__ctorMFNaNbNiNfAyaQdmC6object9ThrowableZCQDcQDaQCwQCn@Base 12
+ _D4core6thread10threadbase11ThreadError6__initZ@Base 12
+ _D4core6thread10threadbase11ThreadError6__vtblZ@Base 12
+ _D4core6thread10threadbase11ThreadError7__ClassZ@Base 12
+ _D4core6thread10threadbase11__moduleRefZ@Base 12
+ _D4core6thread10threadbase11ll_nThreadsm@Base 12
+ _D4core6thread10threadbase11ll_pThreadsPSQBnQBl5types13ll_ThreadData@Base 12
+ _D4core6thread10threadbase12__ModuleInfoZ@Base 12
+ _D4core6thread10threadbase12lowlevelLockFNbNdNiZCQBv4sync5mutex5Mutex@Base 12
+ _D4core6thread10threadbase12suspendDepthk@Base 12
+ _D4core6thread10threadbase13onThreadErrorFNbNiAyaZ5errorCQCdQCbQBx11ThreadError@Base 12
+ _D4core6thread10threadbase13onThreadErrorFNbNiAyaZv@Base 12
+ _D4core6thread10threadbase15ThreadException6__ctorMFNaNbNiNfAyaC6object9ThrowableQvmZCQDgQDeQDaQCr@Base 12
+ _D4core6thread10threadbase15ThreadException6__ctorMFNaNbNiNfAyaQdmC6object9ThrowableZCQDgQDeQDaQCr@Base 12
+ _D4core6thread10threadbase15ThreadException6__initZ@Base 12
+ _D4core6thread10threadbase15ThreadException6__vtblZ@Base 12
+ _D4core6thread10threadbase15ThreadException7__ClassZ@Base 12
+ _D4core6thread10threadbase15ll_removeThreadFNbNimZv@Base 12
+ _D4core6thread10threadbase15scanAllTypeImplFNbMDFNbEQByQBwQBs8ScanTypePvQcZvQgZv@Base 12
+ _D4core6thread10threadbase17multiThreadedFlagb@Base 12
+ _D4core6thread10threadbase17thread_findByAddrFmZCQBvQBtQBp10ThreadBase@Base 12
+ _D4core6thread10threadbase18findLowLevelThreadFNbNimZb@Base 12
+ _D4core6thread10threadbase19initLowlevelThreadsFNiZv@Base 12
+ _D4core6thread10threadbase19termLowlevelThreadsFNiZv@Base 12
+ _D4core6thread10threadbase25_sharedStaticDtor_L948_C1FZv@Base 12
+ _D4core6thread10threadbase7ll_lockG72v@Base 12
+ _D4core6thread10threadbase__T15thread_term_tplTCQBuQBs8osthread6ThreadTG177vZQBwFNiKQnZv@Base 12
+ _D4core6thread10threadbase__T21thread_attachThis_tplTCQCaQBy8osthread6ThreadZQBwFNbZQBf@Base 12
+ _D4core6thread11__moduleRefZ@Base 12
+ _D4core6thread11threadgroup11ThreadGroup3addMFCQBtQBr8osthread6ThreadZv@Base 12
+ _D4core6thread11threadgroup11ThreadGroup6__initZ@Base 12
+ _D4core6thread11threadgroup11ThreadGroup6__vtblZ@Base 12
+ _D4core6thread11threadgroup11ThreadGroup6createMFDFZvZCQCbQBz8osthread6Thread@Base 12
+ _D4core6thread11threadgroup11ThreadGroup6createMFPFZvZCQCbQBz8osthread6Thread@Base 12
+ _D4core6thread11threadgroup11ThreadGroup6removeMFCQBwQBu8osthread6ThreadZv@Base 12
+ _D4core6thread11threadgroup11ThreadGroup7__ClassZ@Base 12
+ _D4core6thread11threadgroup11ThreadGroup7joinAllMFbZv@Base 12
+ _D4core6thread11threadgroup11ThreadGroup7opApplyMFMDFKCQCbQBz8osthread6ThreadZiZi@Base 12
+ _D4core6thread11threadgroup11__moduleRefZ@Base 12
+ _D4core6thread11threadgroup12__ModuleInfoZ@Base 12
+ _D4core6thread12__ModuleInfoZ@Base 12
+ _D4core6thread5fiber11__moduleRefZ@Base 12
+ _D4core6thread5fiber12__ModuleInfoZ@Base 12
+ _D4core6thread5fiber5Fiber10allocStackMFNbmmZv@Base 12
+ _D4core6thread5fiber5Fiber13yieldAndThrowFNbNiC6object9ThrowableZv@Base 12
+ _D4core6thread5fiber5Fiber19_staticCtor_L924_C9FZv@Base 12
+ _D4core6thread5fiber5Fiber3runMFZv@Base 12
+ _D4core6thread5fiber5Fiber4callMFEQBgQBeQBaQx7RethrowZC6object9Throwable@Base 12
+ _D4core6thread5fiber5Fiber5resetMFNbNiDFZvZv@Base 12
+ _D4core6thread5fiber5Fiber5resetMFNbNiPFZvZv@Base 12
+ _D4core6thread5fiber5Fiber5resetMFNbNiZv@Base 12
+ _D4core6thread5fiber5Fiber5stateMxFNaNbNdNiNfZEQBtQBrQBnQBk5State@Base 12
+ _D4core6thread5fiber5Fiber5yieldFNbNiZv@Base 12
+ _D4core6thread5fiber5Fiber6__ctorMFNbDFZvmmZCQBrQBpQBlQBi@Base 12
+ _D4core6thread5fiber5Fiber6__ctorMFNbPFZvmmZCQBrQBpQBlQBi@Base 12
+ _D4core6thread5fiber5Fiber6__dtorMFNbNiZv@Base 12
+ _D4core6thread5fiber5Fiber6__initZ@Base 12
+ _D4core6thread5fiber5Fiber6__vtblZ@Base 12
+ _D4core6thread5fiber5Fiber7__ClassZ@Base 12
+ _D4core6thread5fiber5Fiber7getThisFNbNiNfZCQBpQBnQBjQBg@Base 12
+ _D4core6thread5fiber5Fiber7setThisFNbNiCQBmQBkQBgQBdZv@Base 12
+ _D4core6thread5fiber5Fiber7sm_thisCQBhQBfQBbQy@Base 12
+ _D4core6thread5fiber5Fiber7sm_utxtSQBh3sys5posix8ucontext10ucontext_t@Base 12
+ _D4core6thread5fiber5Fiber8callImplMFNbNiZv@Base 12
+ _D4core6thread5fiber5Fiber8switchInMFNbNiZv@Base 12
+ _D4core6thread5fiber5Fiber9freeStackMFNbNiZv@Base 12
+ _D4core6thread5fiber5Fiber9initStackMFNbNiZv@Base 12
+ _D4core6thread5fiber5Fiber9switchOutMFNbNiZv@Base 12
+ _D4core6thread5fiber5Fiber__T4callVEQBiQBgQBcQz7Rethrowi0ZQBdMFNbNiZC6object9Throwable@Base 12
+ _D4core6thread5fiber5Fiber__T4callVEQBiQBgQBcQz7Rethrowi1ZQBdMFNiZC6object9Throwable@Base 12
+ _D4core6thread5types11__moduleRefZ@Base 12
+ _D4core6thread5types12__ModuleInfoZ@Base 12
+ _D4core6thread5types13ll_ThreadData6__initZ@Base 12
+ _D4core6thread5types17PTHREAD_STACK_MINym@Base 12
+ _D4core6thread5types24_sharedStaticCtor_L54_C1FZv@Base 12
+ _D4core6thread5types8PAGESIZEym@Base 12
+ _D4core6thread7context11__moduleRefZ@Base 12
+ _D4core6thread7context12StackContext6__initZ@Base 12
+ _D4core6thread7context12__ModuleInfoZ@Base 12
+ _D4core6thread7context8Callable6__initZ@Base 12
+ _D4core6thread7context8Callable6opCallMFZv@Base 12
+ _D4core6thread7context8Callable8opAssignMFNaNbNiNfDFZvZv@Base 12
+ _D4core6thread7context8Callable8opAssignMFNaNbNiNfPFZvZv@Base 12
+ _D4core6thread8osthread11__moduleRefZ@Base 12
+ _D4core6thread8osthread11getStackTopFNbNiZPv@Base 12
+ _D4core6thread8osthread11swapContextFNbNiPvZQd@Base 12
+ _D4core6thread8osthread12__ModuleInfoZ@Base 12
+ _D4core6thread8osthread12attachThreadFNbNiCQBpQBn10threadbase10ThreadBaseZQBg@Base 12
+ _D4core6thread8osthread12suspendCountSQBk3sys5posix9semaphore5sem_t@Base 12
+ _D4core6thread8osthread12thread_yieldFNbNiZv@Base 12
+ _D4core6thread8osthread14getStackBottomFNbNiZPv@Base 12
+ _D4core6thread8osthread15adjustStackSizeFNbNimZm@Base 12
+ _D4core6thread8osthread16_mainThreadStoreG177v@Base 12
+ _D4core6thread8osthread17thread_entryPointUNbPvZ21thread_cleanupHandlerUNaNbNiQBhZv@Base 12
+ _D4core6thread8osthread18callWithStackShellFNbMDFNbPvZvZv@Base 12
+ _D4core6thread8osthread18joinLowLevelThreadFNbNimZv@Base 12
+ _D4core6thread8osthread18resumeSignalNumberi@Base 12
+ _D4core6thread8osthread19suspendSignalNumberi@Base 12
+ _D4core6thread8osthread20createLowLevelThreadFNbNiDFNbZvkQhZ20thread_lowlevelEntryUNbPvZQd@Base 12
+ _D4core6thread8osthread20createLowLevelThreadFNbNiDFNbZvkQhZm@Base 12
+ _D4core6thread8osthread6Thread12PRIORITY_MAXFNaNbNdNiNeZxi@Base 12
+ _D4core6thread8osthread6Thread12PRIORITY_MINFNaNbNdNiNeZi@Base 12
+ _D4core6thread8osthread6Thread14loadPrioritiesFNbNiNeZSQCbQBzQBvQBp8Priority@Base 12
+ _D4core6thread8osthread6Thread16PRIORITY_DEFAULTFNaNbNdNiNeZi@Base 12
+ _D4core6thread8osthread6Thread3runMFZv@Base 12
+ _D4core6thread8osthread6Thread4joinMFbZC6object9Throwable@Base 12
+ _D4core6thread8osthread6Thread5cacheOSQBkQBiQBeQy8Priority@Base 12
+ _D4core6thread8osthread6Thread5sleepFNbNiSQBo4time8DurationZv@Base 12
+ _D4core6thread8osthread6Thread5startMFNbZCQBoQBmQBiQBc@Base 12
+ _D4core6thread8osthread6Thread5yieldFNbNiZv@Base 12
+ _D4core6thread8osthread6Thread6__ctorMFNaNbNiNfDFZvmZCQCaQByQBuQBo@Base 12
+ _D4core6thread8osthread6Thread6__ctorMFNaNbNiNfPFZvmZCQCaQByQBuQBo@Base 12
+ _D4core6thread8osthread6Thread6__ctorMFNaNbNiNfmZCQBwQBuQBqQBk@Base 12
+ _D4core6thread8osthread6Thread6__dtorMFNbNiZv@Base 12
+ _D4core6thread8osthread6Thread6__initZ@Base 12
+ _D4core6thread8osthread6Thread6__vtblZ@Base 12
+ _D4core6thread8osthread6Thread7__ClassZ@Base 12
+ _D4core6thread8osthread6Thread7getThisFNbNiNfZCQBtQBrQBnQBh@Base 12
+ _D4core6thread8osthread6Thread8Priority6__initZ@Base 12
+ _D4core6thread8osthread6Thread8priorityMFNdZi@Base 12
+ _D4core6thread8osthread6Thread8priorityMFNdiZv@Base 12
+ _D4core6thread8osthread6Thread9isRunningMFNbNdNiZb@Base 12
+ _D4core6thread8osthread6Thread__T10loadGlobalVAyaa12_5052494f524954595f4d4158ZQBtFNbNiNfZi@Base 12
+ _D4core6thread8osthread6Thread__T10loadGlobalVAyaa12_5052494f524954595f4d494eZQBtFNbNiNfZi@Base 12
+ _D4core6thread8osthread6Thread__T10loadGlobalVAyaa16_5052494f524954595f44454641554c54ZQCbFNbNiNfZi@Base 12
+ _D4core6thread8osthread6resumeFNbNiCQBiQBg10threadbase10ThreadBaseZv@Base 12
+ _D4core6thread8osthread7suspendFNbNiCQBjQBhQBd6ThreadZb@Base 12
+ _D4core6thread8osthread8toThreadFNaNbNiNeNkMCQBrQBp10threadbase10ThreadBaseZCQCxQCvQCr6Thread@Base 12
+ _D4core6vararg11__moduleRefZ@Base 12
+ _D4core6vararg12__ModuleInfoZ@Base 12
+ _D4core7runtime11__moduleRefZ@Base 12
+ _D4core7runtime12__ModuleInfoZ@Base 12
+ _D4core7runtime14UnitTestResult6__initZ@Base 12
+ _D4core7runtime18runModuleUnitTestsUZ19unittestSegvHandlerUiPSQCi3sys5posix6signal9siginfo_tPvZv@Base 12
+ _D4core7runtime19defaultTraceHandlerFPvZC6object9Throwable9TraceInfo@Base 12
+ _D4core7runtime25_sharedStaticCtor_L119_C1FZv@Base 12
+ _D4core7runtime5CArgs6__initZ@Base 12
+ _D4core7runtime7Runtime10initializeFZb@Base 12
+ _D4core7runtime7Runtime16moduleUnitTesterFNdPFZbZv@Base 12
+ _D4core7runtime7Runtime16moduleUnitTesterFNdZPFZb@Base 12
+ _D4core7runtime7Runtime19sm_moduleUnitTesterPFZb@Base 12
+ _D4core7runtime7Runtime22sm_extModuleUnitTesterPFZSQBxQBv14UnitTestResult@Base 12
+ _D4core7runtime7Runtime24extendedModuleUnitTesterFNdPFZSQCcQCa14UnitTestResultZv@Base 12
+ _D4core7runtime7Runtime24extendedModuleUnitTesterFNdZPFZSQCdQCb14UnitTestResult@Base 12
+ _D4core7runtime7Runtime6__initZ@Base 12
+ _D4core7runtime7Runtime9terminateFZb@Base 12
+ _D4core8builtins11__ctfeWriteFNaNbNiNfMAxaZv@Base 12
+ _D4core8builtins11__moduleRefZ@Base 12
+ _D4core8builtins12__ModuleInfoZ@Base 12
+ _D4core8demangle11__moduleRefZ@Base 12
+ _D4core8demangle12__ModuleInfoZ@Base 12
+ _D4core8demangle12demangleTypeFNaNbNfAxaAaZQd@Base 12
+ _D4core8demangle15decodeDmdStringFNaNbNfAxaKmZAya@Base 12
+ _D4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks10parseLNameMFNaNlNfMKSQDeQDc__T8DemangleTSQDyQDwQDqFNaNbNfNkMQDcZQDcZQBmZb@Base 12
+ _D4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks11Replacement6__initZ@Base 12
+ _D4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks11__xopEqualsMxFKxSQDaQCyQCsFNaNbNfNkMQCeZQCeZb@Base 12
+ _D4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks13encodeBackrefMFNaNbNlNfmZv@Base 12
+ _D4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks13flushPositionMFNaNbNlNfKSQDiQDg__T8DemangleTSQEcQEaQDuFNaNbNfNkMQDgZQDgZQBmZv@Base 12
+ _D4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks16positionInResultMFNaNbNiNlNfmZm@Base 12
+ _D4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks6__initZ@Base 12
+ _D4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks9__xtoHashFNbNeKxSQCzQCxQCrFNaNbNfNkMQCdZQCdZm@Base 12
+ _D4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks9parseTypeMFNaNjNfKSQDbQCz__T8DemangleTSQDvQDtQDnFNaNbNfNkMQCzZQCzZQBmAaZQd@Base 12
+ _D4core8demangle15reencodeMangledFNaNbNfNkMAxaZAa@Base 12
+ _D4core8demangle7NoHooks6__initZ@Base 12
+ _D4core8demangleQjFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T10mangleFuncHTPFMDFyPS6object10ModuleInfoZiZiTQBfZQByFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T10mangleFuncHTPFNbMDFNbPvZvZvTQpZQBhFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T10mangleFuncHTPFNbNiAyaMDFNbNiQkZQnbZQrTQzZQBrFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T10mangleFuncHTPFNbNiAyakQeQgmZvTQrZQBjFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T10mangleFuncHTPFNbNiCQBm6thread10threadbase10ThreadBaseZQBkTQBtZQCmFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T10mangleFuncHTPFNbNiCQBm6thread10threadbase10ThreadBaseZvTQBrZQCkFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T10mangleFuncHTPFNbNiPvZQdTQlZQBdFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T10mangleFuncHTPFNbNiPvZvTQkZQBcFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T10mangleFuncHTPFNbNiZPvTQjZQBbFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T10mangleFuncHTPFNbNiZmTQiZQBaFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T10mangleFuncHTPFNbNiZvTQiZQBaFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T10mangleFuncHTPFNbPvMDFNbQhQjZvZvTQtZQBlFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T10mangleFuncHTPFNbPvMDFNbQhZiZvTQrZQBjFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T6mangleTFMDFyPS6object10ModuleInfoZiZiZQBnFNaNbNfNkMAxaNkMAaZ11DotSplitter10indexOfDotMxFNaNbNiNlNfZl@Base 12
+ _D4core8demangle__T6mangleTFMDFyPS6object10ModuleInfoZiZiZQBnFNaNbNfNkMAxaNkMAaZ11DotSplitter11__xopEqualsMxFKxSQEgQEe__TQDyTQDuZQEgFNaNbNfNkMQCtNkMQCtZQCuZb@Base 12
+ _D4core8demangle__T6mangleTFMDFyPS6object10ModuleInfoZiZiZQBnFNaNbNfNkMAxaNkMAaZ11DotSplitter5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8demangle__T6mangleTFMDFyPS6object10ModuleInfoZiZiZQBnFNaNbNfNkMAxaNkMAaZ11DotSplitter5frontMxFNaNbNdNiNjNfZQBs@Base 12
+ _D4core8demangle__T6mangleTFMDFyPS6object10ModuleInfoZiZiZQBnFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D4core8demangle__T6mangleTFMDFyPS6object10ModuleInfoZiZiZQBnFNaNbNfNkMAxaNkMAaZ11DotSplitter8popFrontMFNaNbNiNlNfZv@Base 12
+ _D4core8demangle__T6mangleTFMDFyPS6object10ModuleInfoZiZiZQBnFNaNbNfNkMAxaNkMAaZ11DotSplitter9__xtoHashFNbNeKxSQEfQEd__TQDxTQDtZQEfFNaNbNfNkMQCsNkMQCsZQCtZm@Base 12
+ _D4core8demangle__T6mangleTFMDFyPS6object10ModuleInfoZiZiZQBnFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T6mangleTFNbMDFNbPvZvZvZQxFNaNbNfNkMAxaNkMAaZ11DotSplitter10indexOfDotMxFNaNbNiNlNfZl@Base 12
+ _D4core8demangle__T6mangleTFNbMDFNbPvZvZvZQxFNaNbNfNkMAxaNkMAaZ11DotSplitter11__xopEqualsMxFKxSQDpQDn__TQDhTQDdZQDpFNaNbNfNkMQCtNkMQCtZQCuZb@Base 12
+ _D4core8demangle__T6mangleTFNbMDFNbPvZvZvZQxFNaNbNfNkMAxaNkMAaZ11DotSplitter5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8demangle__T6mangleTFNbMDFNbPvZvZvZQxFNaNbNfNkMAxaNkMAaZ11DotSplitter5frontMxFNaNbNdNiNjNfZQBs@Base 12
+ _D4core8demangle__T6mangleTFNbMDFNbPvZvZvZQxFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D4core8demangle__T6mangleTFNbMDFNbPvZvZvZQxFNaNbNfNkMAxaNkMAaZ11DotSplitter8popFrontMFNaNbNiNlNfZv@Base 12
+ _D4core8demangle__T6mangleTFNbMDFNbPvZvZvZQxFNaNbNfNkMAxaNkMAaZ11DotSplitter9__xtoHashFNbNeKxSQDoQDm__TQDgTQDcZQDoFNaNbNfNkMQCsNkMQCsZQCtZm@Base 12
+ _D4core8demangle__T6mangleTFNbMDFNbPvZvZvZQxFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyaMDFNbNiQkZQnbZQrZQBhFNaNbNfNkMAxaNkMAaZ11DotSplitter10indexOfDotMxFNaNbNiNlNfZl@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyaMDFNbNiQkZQnbZQrZQBhFNaNbNfNkMAxaNkMAaZ11DotSplitter11__xopEqualsMxFKxSQEaQDy__TQDsTQDoZQEaFNaNbNfNkMQCtNkMQCtZQCuZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyaMDFNbNiQkZQnbZQrZQBhFNaNbNfNkMAxaNkMAaZ11DotSplitter5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyaMDFNbNiQkZQnbZQrZQBhFNaNbNfNkMAxaNkMAaZ11DotSplitter5frontMxFNaNbNdNiNjNfZQBs@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyaMDFNbNiQkZQnbZQrZQBhFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyaMDFNbNiQkZQnbZQrZQBhFNaNbNfNkMAxaNkMAaZ11DotSplitter8popFrontMFNaNbNiNlNfZv@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyaMDFNbNiQkZQnbZQrZQBhFNaNbNfNkMAxaNkMAaZ11DotSplitter9__xtoHashFNbNeKxSQDzQDx__TQDrTQDnZQDzFNaNbNfNkMQCsNkMQCsZQCtZm@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyaMDFNbNiQkZQnbZQrZQBhFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyakQeQgmZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter10indexOfDotMxFNaNbNiNlNfZl@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyakQeQgmZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter11__xopEqualsMxFKxSQDrQDp__TQDjTQDfZQDrFNaNbNfNkMQCtNkMQCtZQCuZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyakQeQgmZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyakQeQgmZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter5frontMxFNaNbNdNiNjNfZQBs@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyakQeQgmZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyakQeQgmZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter8popFrontMFNaNbNiNlNfZv@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyakQeQgmZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter9__xtoHashFNbNeKxSQDqQDo__TQDiTQDeZQDqFNaNbNfNkMQCsNkMQCsZQCtZm@Base 12
+ _D4core8demangle__T6mangleTFNbNiAyakQeQgmZvZQzFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZQBkZQCbFNaNbNfNkMAxaNkMAaZ11DotSplitter10indexOfDotMxFNaNbNiNlNfZl@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZQBkZQCbFNaNbNfNkMAxaNkMAaZ11DotSplitter11__xopEqualsMxFKxSQEuQEs__TQEmTQEiZQEuFNaNbNfNkMQCtNkMQCtZQCuZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZQBkZQCbFNaNbNfNkMAxaNkMAaZ11DotSplitter5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZQBkZQCbFNaNbNfNkMAxaNkMAaZ11DotSplitter5frontMxFNaNbNdNiNjNfZQBs@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZQBkZQCbFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZQBkZQCbFNaNbNfNkMAxaNkMAaZ11DotSplitter8popFrontMFNaNbNiNlNfZv@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZQBkZQCbFNaNbNfNkMAxaNkMAaZ11DotSplitter9__xtoHashFNbNeKxSQEtQEr__TQElTQEhZQEtFNaNbNfNkMQCsNkMQCsZQCtZm@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZQBkZQCbFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZvZQBzFNaNbNfNkMAxaNkMAaZ11DotSplitter10indexOfDotMxFNaNbNiNlNfZl@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZvZQBzFNaNbNfNkMAxaNkMAaZ11DotSplitter11__xopEqualsMxFKxSQEsQEq__TQEkTQEgZQEsFNaNbNfNkMQCtNkMQCtZQCuZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZvZQBzFNaNbNfNkMAxaNkMAaZ11DotSplitter5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZvZQBzFNaNbNfNkMAxaNkMAaZ11DotSplitter5frontMxFNaNbNdNiNjNfZQBs@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZvZQBzFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZvZQBzFNaNbNfNkMAxaNkMAaZ11DotSplitter8popFrontMFNaNbNiNlNfZv@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZvZQBzFNaNbNfNkMAxaNkMAaZ11DotSplitter9__xtoHashFNbNeKxSQErQEp__TQEjTQEfZQErFNaNbNfNkMQCsNkMQCsZQCtZm@Base 12
+ _D4core8demangle__T6mangleTFNbNiCQBf6thread10threadbase10ThreadBaseZvZQBzFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZQdZQtFNaNbNfNkMAxaNkMAaZ11DotSplitter10indexOfDotMxFNaNbNiNlNfZl@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZQdZQtFNaNbNfNkMAxaNkMAaZ11DotSplitter11__xopEqualsMxFKxSQDlQDj__TQDdTQCzZQDlFNaNbNfNkMQCtNkMQCtZQCuZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZQdZQtFNaNbNfNkMAxaNkMAaZ11DotSplitter5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZQdZQtFNaNbNfNkMAxaNkMAaZ11DotSplitter5frontMxFNaNbNdNiNjNfZQBs@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZQdZQtFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZQdZQtFNaNbNfNkMAxaNkMAaZ11DotSplitter8popFrontMFNaNbNiNlNfZv@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZQdZQtFNaNbNfNkMAxaNkMAaZ11DotSplitter9__xtoHashFNbNeKxSQDkQDi__TQDcTQCyZQDkFNaNbNfNkMQCsNkMQCsZQCtZm@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZQdZQtFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZvZQsFNaNbNfNkMAxaNkMAaZ11DotSplitter10indexOfDotMxFNaNbNiNlNfZl@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZvZQsFNaNbNfNkMAxaNkMAaZ11DotSplitter11__xopEqualsMxFKxSQDkQDi__TQDcTQCyZQDkFNaNbNfNkMQCtNkMQCtZQCuZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZvZQsFNaNbNfNkMAxaNkMAaZ11DotSplitter5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZvZQsFNaNbNfNkMAxaNkMAaZ11DotSplitter5frontMxFNaNbNdNiNjNfZQBs@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZvZQsFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZvZQsFNaNbNfNkMAxaNkMAaZ11DotSplitter8popFrontMFNaNbNiNlNfZv@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZvZQsFNaNbNfNkMAxaNkMAaZ11DotSplitter9__xtoHashFNbNeKxSQDjQDh__TQDbTQCxZQDjFNaNbNfNkMQCsNkMQCsZQCtZm@Base 12
+ _D4core8demangle__T6mangleTFNbNiPvZvZQsFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T6mangleTFNbNiZPvZQrFNaNbNfNkMAxaNkMAaZ11DotSplitter10indexOfDotMxFNaNbNiNlNfZl@Base 12
+ _D4core8demangle__T6mangleTFNbNiZPvZQrFNaNbNfNkMAxaNkMAaZ11DotSplitter11__xopEqualsMxFKxSQDjQDh__TQDbTQCxZQDjFNaNbNfNkMQCtNkMQCtZQCuZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiZPvZQrFNaNbNfNkMAxaNkMAaZ11DotSplitter5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiZPvZQrFNaNbNfNkMAxaNkMAaZ11DotSplitter5frontMxFNaNbNdNiNjNfZQBs@Base 12
+ _D4core8demangle__T6mangleTFNbNiZPvZQrFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D4core8demangle__T6mangleTFNbNiZPvZQrFNaNbNfNkMAxaNkMAaZ11DotSplitter8popFrontMFNaNbNiNlNfZv@Base 12
+ _D4core8demangle__T6mangleTFNbNiZPvZQrFNaNbNfNkMAxaNkMAaZ11DotSplitter9__xtoHashFNbNeKxSQDiQDg__TQDaTQCwZQDiFNaNbNfNkMQCsNkMQCsZQCtZm@Base 12
+ _D4core8demangle__T6mangleTFNbNiZPvZQrFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T6mangleTFNbNiZmZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter10indexOfDotMxFNaNbNiNlNfZl@Base 12
+ _D4core8demangle__T6mangleTFNbNiZmZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter11__xopEqualsMxFKxSQDiQDg__TQDaTQCwZQDiFNaNbNfNkMQCtNkMQCtZQCuZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiZmZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiZmZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter5frontMxFNaNbNdNiNjNfZQBs@Base 12
+ _D4core8demangle__T6mangleTFNbNiZmZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D4core8demangle__T6mangleTFNbNiZmZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter8popFrontMFNaNbNiNlNfZv@Base 12
+ _D4core8demangle__T6mangleTFNbNiZmZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter9__xtoHashFNbNeKxSQDhQDf__TQCzTQCvZQDhFNaNbNfNkMQCsNkMQCsZQCtZm@Base 12
+ _D4core8demangle__T6mangleTFNbNiZmZQqFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T6mangleTFNbNiZvZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter10indexOfDotMxFNaNbNiNlNfZl@Base 12
+ _D4core8demangle__T6mangleTFNbNiZvZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter11__xopEqualsMxFKxSQDiQDg__TQDaTQCwZQDiFNaNbNfNkMQCtNkMQCtZQCuZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiZvZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8demangle__T6mangleTFNbNiZvZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter5frontMxFNaNbNdNiNjNfZQBs@Base 12
+ _D4core8demangle__T6mangleTFNbNiZvZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D4core8demangle__T6mangleTFNbNiZvZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter8popFrontMFNaNbNiNlNfZv@Base 12
+ _D4core8demangle__T6mangleTFNbNiZvZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter9__xtoHashFNbNeKxSQDhQDf__TQCzTQCvZQDhFNaNbNfNkMQCsNkMQCsZQCtZm@Base 12
+ _D4core8demangle__T6mangleTFNbNiZvZQqFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhQjZvZvZQBbFNaNbNfNkMAxaNkMAaZ11DotSplitter10indexOfDotMxFNaNbNiNlNfZl@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhQjZvZvZQBbFNaNbNfNkMAxaNkMAaZ11DotSplitter11__xopEqualsMxFKxSQDuQDs__TQDmTQDiZQDuFNaNbNfNkMQCtNkMQCtZQCuZb@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhQjZvZvZQBbFNaNbNfNkMAxaNkMAaZ11DotSplitter5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhQjZvZvZQBbFNaNbNfNkMAxaNkMAaZ11DotSplitter5frontMxFNaNbNdNiNjNfZQBs@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhQjZvZvZQBbFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhQjZvZvZQBbFNaNbNfNkMAxaNkMAaZ11DotSplitter8popFrontMFNaNbNiNlNfZv@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhQjZvZvZQBbFNaNbNfNkMAxaNkMAaZ11DotSplitter9__xtoHashFNbNeKxSQDtQDr__TQDlTQDhZQDtFNaNbNfNkMQCsNkMQCsZQCtZm@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhQjZvZvZQBbFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhZiZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter10indexOfDotMxFNaNbNiNlNfZl@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhZiZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter11__xopEqualsMxFKxSQDrQDp__TQDjTQDfZQDrFNaNbNfNkMQCtNkMQCtZQCuZb@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhZiZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhZiZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter5frontMxFNaNbNdNiNjNfZQBs@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhZiZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhZiZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter8popFrontMFNaNbNiNlNfZv@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhZiZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter9__xtoHashFNbNeKxSQDqQDo__TQDiTQDeZQDqFNaNbNfNkMQCsNkMQCsZQCtZm@Base 12
+ _D4core8demangle__T6mangleTFNbPvMDFNbQhZiZvZQzFNaNbNfNkMAxaNkMAaZQd@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl10isHexDigitFNaNbNiNfaZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl10parseLNameMFNaNlNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl10parseValueMFNaNlNfMAaaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl11__xopEqualsMxFKxSQDyQDw__TQDqTQDkZQDyZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl11peekBackrefMFNaNfZa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl11sliceNumberMFNaNjNfZQBs@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl12decodeNumberMFNaNlNfMQBtZm@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl12decodeNumberMFNaNlNfZm@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl12demangleNameMFNaNbNfZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl12demangleTypeMFNaNbNfZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl13parseFuncAttrMFNaNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl13parseModifierMFNaNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl14ParseException6__ctorMFNaNbNiNfAyaZCQErQEp__TQEjTQEdZQErQCg@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl14ParseException6__initZ@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl14ParseException6__vtblZ@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl14ParseException7__ClassZ@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl15parseSymbolNameMFNaNlNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl16isCallConventionFNaNbNiNfaZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl16parseMangledNameMFNaNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl16parseMangledNameMFNaNlNfbmZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl17OverflowException6__ctorMFNaNbNiNfAyaZCQEuQEs__TQEmTQEgZQEuQCj@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl17OverflowException6__initZ@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl17OverflowException6__vtblZ@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl17OverflowException7__ClassZ@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl17isSymbolNameFrontMFNaNfZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl17parseIntegerValueMFNaNlNfMAaaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl17parseTemplateArgsMFNaNlNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl17parseTypeFunctionMFNaNjNfAaEQEjQEh__TQEbTQDvZQEj10IsDelegateZQBk@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl18parseFuncArgumentsMFNaNlNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl18parseQualifiedNameMFNaNjNfZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl19mayBeMangledNameArgMFNaNfZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl19parseCallConventionMFNaNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl19parseMangledNameArgMFNaNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl25mayBeTemplateInstanceNameMFNaNlNfZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl25parseFunctionTypeNoReturnMFNaNjNfbZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl25parseTemplateInstanceNameMFNaNlNfbZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl3eatMFNaNfaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl3padMFNaNfQBgZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl3putMFNaNjNfMQBjZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl3putMFNaNjNfaZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl4peekMFNaNbNiNfmZa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl4testMFNaNfaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl5emptyMFNaNbNdNiNfZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl5errorFNaNeAyaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl5frontMFNaNbNdNiNfZa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl5matchMFNaNfQBiZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl5matchMFNaNfaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl5shiftMFNaNiNfQBkZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl6__ctorMFNaNbNcNiNfNkMQBsEQEfQEd__TQDxTQDrZQEf7AddTypeNkMAaZSQFoQFm__TQFgTQFaZQFo@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl6__ctorMFNaNbNcNiNfNkMQBsNkMAaZSQElQEj__TQEdTQDxZQEl@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl6__initZ@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl6appendMFNaNjNfQBlZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl6removeMFNaNbNiNfQBnZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl6silentMFNaNfDFNaNfZvZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl7isAlphaFNaNbNiNfaZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl7isDigitFNaNbNiNfaZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl8containsFNaNbNiNeQBoQBrZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl8overflowFNaNiNeAyaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl8popFrontMFNaNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl8popFrontMFNaNfiZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl8putAsHexMFNaNfmiZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl8putCommaMFNaNfmZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl9__xtoHashFNbNeKxSQDxQDv__TQDpTQDjZQDxZm@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl9ascii2hexFNaNfaZh@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl9copyInputMFNaNbNjNfZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl9parseRealMFNaNlNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl9parseTypeMFNaNjNfAaZ10primitivesyG23Aa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl9parseTypeMFNaNjNfAaZQd@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl__T10doDemangleSQDvQDt__TQDnTQDhZQDv16parseMangledNameZQCaMFNaNbNjNfZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl__T10doDemangleS_DQDxQDv__TQDpTQDjZQDx9parseTypeMFNaNjNfAaZQdZQChMFNaNbNjNfZQu@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl__T13decodeBackrefVii0ZQuMFNaNfZm@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl__T13decodeBackrefVmi1ZQuMFNaNfZm@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa10isHexDigitFNaNbNiNfaZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa10parseLNameMFNaNlNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa10parseValueMFNaNlNfMAaaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa11__xopEqualsMxFKxSQCnQCl__TQCfTQBzZQCnZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa11peekBackrefMFNaNfZa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa11sliceNumberMFNaNjNfZAxa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa12decodeNumberMFNaNlNfMAxaZm@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa12decodeNumberMFNaNlNfZm@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa12demangleNameMFNaNbNfZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa12demangleTypeMFNaNbNfZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa13parseFuncAttrMFNaNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa13parseModifierMFNaNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa14ParseException6__ctorMFNaNbNiNfAyaZCQDgQDe__TQCyTQCsZQDgQCg@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa14ParseException6__initZ@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa14ParseException6__vtblZ@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa14ParseException7__ClassZ@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa15parseSymbolNameMFNaNlNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa16isCallConventionFNaNbNiNfaZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa16parseMangledNameMFNaNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa16parseMangledNameMFNaNlNfbmZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa17OverflowException6__ctorMFNaNbNiNfAyaZCQDjQDh__TQDbTQCvZQDjQCj@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa17OverflowException6__initZ@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa17OverflowException6__vtblZ@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa17OverflowException7__ClassZ@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa17isSymbolNameFrontMFNaNfZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa17parseIntegerValueMFNaNlNfMAaaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa17parseTemplateArgsMFNaNlNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa17parseTypeFunctionMFNaNjNfAaEQCyQCw__TQCqTQCkZQCy10IsDelegateZQBk@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa18parseFuncArgumentsMFNaNlNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa18parseQualifiedNameMFNaNjNfZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa19mayBeMangledNameArgMFNaNfZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa19parseCallConventionMFNaNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa19parseMangledNameArgMFNaNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa25mayBeTemplateInstanceNameMFNaNlNfZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa25parseFunctionTypeNoReturnMFNaNjNfbZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa25parseTemplateInstanceNameMFNaNlNfbZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa3eatMFNaNfaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa3padMFNaNfAxaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa3putMFNaNjNfMAxaZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa3putMFNaNjNfaZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa4peekMFNaNbNiNfmZa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa4testMFNaNfaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa5emptyMFNaNbNdNiNfZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa5errorFNaNeAyaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa5frontMFNaNbNdNiNfZa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa5matchMFNaNfAxaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa5matchMFNaNfaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa5shiftMFNaNiNfAxaZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa6__ctorMFNaNbNcNiNfNkMAxaEQCuQCs__TQCmTQCgZQCu7AddTypeNkMAaZSQEdQEb__TQDvTQDpZQEd@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa6__ctorMFNaNbNcNiNfNkMAxaNkMAaZSQDaQCy__TQCsTQCmZQDa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa6__initZ@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa6appendMFNaNjNfAxaZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa6removeMFNaNbNiNfAxaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa6silentMFNaNfDFNaNfZvZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa7isAlphaFNaNbNiNfaZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa7isDigitFNaNbNiNfaZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa8containsFNaNbNiNeAxaQdZb@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa8overflowFNaNiNeAyaZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa8popFrontMFNaNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa8popFrontMFNaNfiZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa8putAsHexMFNaNfmiZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa8putCommaMFNaNfmZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa9__xtoHashFNbNeKxSQCmQCk__TQCeTQByZQCmZm@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa9ascii2hexFNaNfaZh@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa9copyInputMFNaNbNjNfZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa9parseRealMFNaNlNfZv@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa9parseTypeMFNaNjNfAaZ10primitivesyG23Aa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa9parseTypeMFNaNjNfAaZQd@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa__T10doDemangleSQCkQCi__TQCcTQBwZQCk16parseMangledNameZQCaMFNaNbNjNfZAa@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa__T10doDemangleS_DQCmQCk__TQCeTQByZQCm9parseTypeMFNaNjNfAaZQdZQChMFNaNbNjNfZQu@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa__T13decodeBackrefVii0ZQuMFNaNfZm@Base 12
+ _D4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa__T13decodeBackrefVmi1ZQuMFNaNfZm@Base 12
+ _D4core8internal10attributes11__moduleRefZ@Base 12
+ _D4core8internal10attributes12__ModuleInfoZ@Base 12
+ _D4core8internal10entrypoint11__moduleRefZ@Base 12
+ _D4core8internal10entrypoint12__ModuleInfoZ@Base 12
+ _D4core8internal11destruction11__moduleRefZ@Base 12
+ _D4core8internal11destruction12__ModuleInfoZ@Base 12
+ _D4core8internal11destruction__T15destructRecurseTS3gcc8sections3elf9ThreadDSOZQBvFNaNbNiNfKQBqZv@Base 12
+ _D4core8internal11destruction__T15destructRecurseTS3std11concurrency7MessageZQBtFNfKQBiZv@Base 12
+ _D4core8internal11destruction__T15destructRecurseTS3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGqZQKaFNaNbNiKQJtZv@Base 12
+ _D4core8internal11destruction__T15destructRecurseTS3std3net4curl3FTP4ImplZQBqFKQBdZv@Base 12
+ _D4core8internal11destruction__T15destructRecurseTS3std3net4curl4HTTP4ImplZQBrFKQBeZv@Base 12
+ _D4core8internal11destruction__T15destructRecurseTS3std3net4curl4SMTP4ImplZQBrFKQBeZv@Base 12
+ _D4core8internal11destruction__T15destructRecurseTS3std4file15DirIteratorImplZQBuFNfKQBjZv@Base 12
+ _D4core8internal11destruction__T15destructRecurseTS3std5stdio4FileZQBjFNfKQyZv@Base 12
+ _D4core8internal11destruction__T15destructRecurseTSQBx2gc11gcinterface4RootZQBsFNaNbNiNfKQBnZv@Base 12
+ _D4core8internal11destruction__T15destructRecurseTSQBx2gc11gcinterface5RangeZQBtFNaNbNiNfKQBoZv@Base 12
+ _D4core8internal11destruction__T15destructRecurseTSQBxQBv9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQDlFNaNbNiNfKQDgZv@Base 12
+ _D4core8internal11destruction__T15destructRecurseTSQBxQBv9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQDnFNaNbNiNfKQDiZv@Base 12
+ _D4core8internal11destruction__T15destructRecurseTSQBxQBv9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQDlFNaNbNiNfKQDgZv@Base 12
+ _D4core8internal12parseoptions10parseErrorFNbNiMxAaMxQeMxQiAxaZb@Base 12
+ _D4core8internal12parseoptions11__moduleRefZ@Base 12
+ _D4core8internal12parseoptions12__ModuleInfoZ@Base 12
+ _D4core8internal12parseoptions15overflowedErrorFNbNiMxAaMxQeZb@Base 12
+ _D4core8internal12parseoptions3minFNbNimmZm@Base 12
+ _D4core8internal12parseoptions5parseFNbNiAxaKANgaKQfQlZb@Base 12
+ _D4core8internal12parseoptions5parseFNbNiAxaKANgaKbQkZb@Base 12
+ _D4core8internal12parseoptions5parseFNbNiAxaKANgaKfQkZb@Base 12
+ _D4core8internal12parseoptions6MemVal6__initZ@Base 12
+ _D4core8internal12parseoptions8optErrorFNbNiMxAaMxQeAxaZb@Base 12
+ _D4core8internal12parseoptions__T12parseOptionsTSQBv2gc6config6ConfigZQBlFNbNiKQBfAyaZb@Base 12
+ _D4core8internal12parseoptions__T14rt_parseOptionTbZQtFNbNiAxaKANgaKbQkZb@Base 12
+ _D4core8internal12parseoptions__T17initConfigOptionsTSQCa2gc6config6ConfigZQBqFNbNiKQBfAyaZb@Base 12
+ _D4core8internal12parseoptions__T4skipX7isspaceZQpFNaNbNiNfANgaZQf@Base 12
+ _D4core8internal12parseoptions__T5parseHThZQkFNbNiAxaKANgaKhQkbZb@Base 12
+ _D4core8internal12parseoptions__T5parseHTkZQkFNbNiAxaKANgaKkQkbZb@Base 12
+ _D4core8internal12parseoptions__T5parseHTmZQkFNbNiAxaKANgaKmQkbZb@Base 12
+ _D4core8internal2gc2os10isLowOnMemFNbNimZb@Base 12
+ _D4core8internal2gc2os10os_mem_mapFNbNimbZPv@Base 12
+ _D4core8internal2gc2os11__moduleRefZ@Base 12
+ _D4core8internal2gc2os12__ModuleInfoZ@Base 12
+ _D4core8internal2gc2os12os_mem_unmapFNbNiPvmZi@Base 12
+ _D4core8internal2gc2os15os_physical_memFNbNiZm@Base 12
+ _D4core8internal2gc2os8wait_pidFNbNiibZEQBmQBkQBeQBe11ChildStatus@Base 12
+ _D4core8internal2gc4bits11__moduleRefZ@Base 12
+ _D4core8internal2gc4bits12__ModuleInfoZ@Base 12
+ _D4core8internal2gc4bits6GCBits10clearWordsMFNbNimmZv@Base 12
+ _D4core8internal2gc4bits6GCBits10copyRangeZMFNbNimmPxmZv@Base 12
+ _D4core8internal2gc4bits6GCBits16copyWordsShiftedMFNbNimmmPxmZv@Base 12
+ _D4core8internal2gc4bits6GCBits18copyRangeRepeatingMFNbNimmPxmmZv@Base 12
+ _D4core8internal2gc4bits6GCBits3setMFNaNbNiNlNemZi@Base 12
+ _D4core8internal2gc4bits6GCBits4DtorMFNbNibZv@Base 12
+ _D4core8internal2gc4bits6GCBits4copyMFNbNiPSQBqQBoQBiQBiQBgZv@Base 12
+ _D4core8internal2gc4bits6GCBits4testMxFNaNbNiNlNemZm@Base 12
+ _D4core8internal2gc4bits6GCBits4zeroMFNbNiZv@Base 12
+ _D4core8internal2gc4bits6GCBits5allocMFNbNimbZv@Base 12
+ _D4core8internal2gc4bits6GCBits5clearMFNaNbNiNlNemZi@Base 12
+ _D4core8internal2gc4bits6GCBits6__initZ@Base 12
+ _D4core8internal2gc4bits6GCBits6nwordsMxFNaNbNdNiZm@Base 12
+ _D4core8internal2gc4bits6GCBits6setAllMFNbNiZv@Base 12
+ _D4core8internal2gc4bits6GCBits8clrRangeMFNbNimmZv@Base 12
+ _D4core8internal2gc4bits6GCBits8setRangeMFNbNimmZv@Base 12
+ _D4core8internal2gc4bits6GCBits8setWordsMFNbNimmZv@Base 12
+ _D4core8internal2gc4bits6GCBits9clrRangeZMFNbNimmZv@Base 12
+ _D4core8internal2gc4bits6GCBits9copyRangeMFNbNimmPxmZv@Base 12
+ _D4core8internal2gc4bits6GCBits9copyWordsMFNbNimmPxmZv@Base 12
+ _D4core8internal2gc4bits6GCBits9setLockedMFNaNbNiNlNemZm@Base 12
+ _D4core8internal2gc4bits6GCBits9setRangeZMFNbNimmZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw10baseOffsetFNbNimEQCfQCdQBxQBxQBvQCg4BinsZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw10extendTimel@Base 12
+ _D4core8internal2gc4impl12conservativeQw10initializeFZCQCbQBq11gcinterface2GC@Base 12
+ _D4core8internal2gc4impl12conservativeQw10mallocTimel@Base 12
+ _D4core8internal2gc4impl12conservativeQw10numExtendsl@Base 12
+ _D4core8internal2gc4impl12conservativeQw10numMallocsl@Base 12
+ _D4core8internal2gc4impl12conservativeQw11__moduleRefZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw11calcBinBaseFZG15G256s@Base 12
+ _D4core8internal2gc4impl12conservativeQw11numReallocsl@Base 12
+ _D4core8internal2gc4impl12conservativeQw11reallocTimel@Base 12
+ _D4core8internal2gc4impl12conservativeQw12LeakDetector10initializeFNbPSQCrQCpQCjQCjQChQCs3GcxZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw12LeakDetector10log_mallocFNbPvmZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw12LeakDetector10log_parentFNbPvQcZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw12LeakDetector11log_collectFNbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw12LeakDetector6__initZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw12LeakDetector8log_freeFNbNiPvmZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw12__ModuleInfoZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw12maxPauseTimeSQCb4time8Duration@Base 12
+ _D4core8internal2gc4impl12conservativeQw12sentinel_addFNbNiPvZQd@Base 12
+ _D4core8internal2gc4impl12conservativeQw12sentinel_subFNbNiPvZQd@Base 12
+ _D4core8internal2gc4impl12conservativeQw13maxPoolMemorym@Base 12
+ _D4core8internal2gc4impl12conservativeQw13sentinel_initFNbNiPvmZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw13sentinel_sizeFNbNixPvmZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC10freeNoSyncMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC10removeRootMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC11checkNoSyncMFNbPvZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC11fullCollectMFNbZ2goFNbPSQDcQDaQCuQCuQCsQDd3GcxZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC11fullCollectMFNbZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC11inFinalizerMFNbNiNfZb@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC11queryNoSyncMFNbPvZSQCx6memory8BlkInfo_@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC11removeRangeMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC12_inFinalizerb@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC12addrOfNoSyncMFNbNiPvZQd@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC12extendNoSyncMFNbPvmmxC8TypeInfoZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC12mallocNoSyncMFNbmkKmxC8TypeInfoZPv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC12profileStatsMFNbNiNeZSQDa6memory2GC12ProfileStats@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC12sizeOfNoSyncMFNbNiPvZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC13reallocNoSyncMFNbPvmKkKmxC8TypeInfoZQt@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC13reserveNoSyncMFNbmZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC13runFinalizersMFNbMxAvZ2goFNbPSQDiQDgQDaQDaQCyQDj3GcxMxQBjZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC13runFinalizersMFNbMxAvZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC14collectNoStackMFNbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC14getStatsNoSyncMFNbNiNeJSQDc6memory2GC5StatsZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC18fullCollectNoStackMFNbZ2goFNbPSQDjQDhQDbQDbQCzQDk3GcxZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC18fullCollectNoStackMFNbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC24allocatedInCurrentThreadMFNbZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC4filePa@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC4freeMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC4linem@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC5checkMFNbPvZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC5queryMFNbPvZSQCq6memory8BlkInfo_@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC5statsMFNbNiNfZSQCs6memory2GC5Stats@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC6__ctorMFZCQCnQClQCfQCfQCdQCoQBt@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC6__dtorMFZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC6__initZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC6__vtblZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC6addrOfMFNbNiPvZQd@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC6callocMFNbmkxC8TypeInfoZPv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC6enableMFZ2goFNaNbNiNfPSQDaQCyQCsQCsQCqQDb3GcxZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC6enableMFZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC6extendMFNbPvmmxC8TypeInfoZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC6gcLockOSQClQCj8spinlock15AlignedSpinLock@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC6lockNRFNbNiNfZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC6mallocMFNbmkxC8TypeInfoZPv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC6qallocMFNbmkMxC8TypeInfoZSQDd6memory8BlkInfo_@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC6sizeOfMFNbNiPvZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC7__ClassZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC7addRootMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC7clrAttrMFNbPvkZ2goFNbPSQDaQCyQCsQCsQCqQDb3GcxQBikZk@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC7clrAttrMFNbPvkZk@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC7collectMFNbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC7disableMFZ2goFNaNbNiNfPSQDbQCzQCtQCtQCrQDc3GcxZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC7disableMFZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC7getAttrMFNbPvZ2goFNbPSQCzQCxQCrQCrQCpQDa3GcxQBhZk@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC7getAttrMFNbPvZk@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC7reallocMFNbPvmkxC8TypeInfoZQq@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC7reserveMFNbmZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC7setAttrMFNbPvkZ2goFNbPSQDaQCyQCsQCsQCqQDb3GcxQBikZk@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC7setAttrMFNbPvkZk@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC8addRangeMFNbNiPvmxC8TypeInfoZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC8minimizeMFNbZ2goFNbPSQCyQCwQCqQCqQCoQCz3GcxZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC8minimizeMFNbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC8rootIterMFNdNiZDFMDFNbKSQDbQCq11gcinterface4RootZiZi@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC9isPreciseb@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC9rangeIterMFNdNiZDFMDFNbKSQDcQCr11gcinterface5RangeZiZi@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy10freeNoSyncMFNbNiPvZvS_DQEmQEkQEeQEeQEcQEn8freeTimelS_DQFrQFpQFjQFjQFhQFs8numFreeslTQCpZQEtMFNbNiKQDdZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy11checkNoSyncMFNbPvZvS_DQElQEjQEdQEdQEbQEm9otherTimelS_DQFrQFpQFjQFjQFhQFs9numOtherslTQCrZQEuMFNbKQDdZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy11fullCollectMFNbZ2goFNbPSQEnQElQEfQEfQEdQEo3GcxZmTQBbZQDlMFNbKQBnZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy11queryNoSyncMFNbPvZSQEi6memory8BlkInfo_S_DQFeQFcQEwQEwQEuQFf9otherTimelS_DQGkQGiQGcQGcQGaQGl9numOtherslTQDkZQFnMFNbKQDwZQDx@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy12addrOfNoSyncMFNbNiPvZQdS_DQEpQEnQEhQEhQEfQEq9otherTimelS_DQFvQFtQFnQFnQFlQFw9numOtherslTQCsZQEyMFNbNiKQDgZQDk@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy12extendNoSyncMFNbPvmmxC8TypeInfoZmS_DQEzQExQErQErQEpQFa10extendTimelS_DQGhQGfQFzQFzQFxQGi10numExtendslTQDiTmTmTxQDmZQFvMFNbKQEdKmKmKxQEhZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy12mallocNoSyncMFNbmkKmxC8TypeInfoZPvS_DQFaQEyQEsQEsQEqQFb10mallocTimelS_DQGiQGgQGaQGaQFyQGj10numMallocslTmTkTmTxQDlZQFuMFNbKmKkKmKxQEeZQDx@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy12sizeOfNoSyncMFNbNiPvZmS_DQEoQEmQEgQEgQEeQEp9otherTimelS_DQFuQFsQFmQFmQFkQFv9numOtherslTQCrZQExMFNbNiKQDfZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy13reallocNoSyncMFNbPvmKkKmxC8TypeInfoZQtS_DQFeQFcQEwQEwQEuQFf10mallocTimelS_DQGmQGkQGeQGeQGcQGn10numMallocslTQDmTmTkTmTxQDpZQGcMFNbKQEjKmKkKmKxQEmZQEy@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy13reserveNoSyncMFNbmZmS_DQEmQEkQEeQEeQEcQEn9otherTimelS_DQFsQFqQFkQFkQFiQFt9numOtherslTmZQEtMFNbKmZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy13runFinalizersMFNbMxAvZ2goFNbPSQEtQErQElQElQEjQEu3GcxMxQBjZvS_DQFzQFxQFrQFrQFpQGa9otherTimelS_DQHfQHdQGxQGxQGvQHg9numOtherslTQDsTxQEgZQGnMFNbKQEjKxQExZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy14getStatsNoSyncMFNbNiNeJSQEn6memory2GC5StatsZvS_DQFlQFjQFdQFdQFbQFm9otherTimelS_DQGrQGpQGjQGjQGhQGs9numOtherslTQDjZQFuMFNbNiNfKQDzZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy18fullCollectNoStackMFNbZ2goFNbPSQEuQEsQEmQEmQEkQEv3GcxZmTQBbZQDsMFNbKQBnZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy6enableMFZ2goFNaNbNiNfPSQElQEjQEdQEdQEbQEm3GcxZvS_DQFmQFkQFeQFeQFcQFn9otherTimelS_DQGsQGqQGkQGkQGiQGt9numOtherslTQDnZQFvMFNbNiNfKQEdZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy7clrAttrMFNbPvkZ2goFNbPSQElQEjQEdQEdQEbQEm3GcxQBikZkS_DQFqQFoQFiQFiQFgQFr9otherTimelS_DQGwQGuQGoQGoQGmQGx9numOtherslTQDrTQEfTkZQGfMFNbKQEjKQExKkZk@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy7disableMFZ2goFNaNbNiNfPSQEmQEkQEeQEeQEcQEn3GcxZvS_DQFnQFlQFfQFfQFdQFo9otherTimelS_DQGtQGrQGlQGlQGjQGu9numOtherslTQDnZQFwMFNbNiNfKQEdZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy7getAttrMFNbPvZ2goFNbPSQEkQEiQEcQEcQEaQEl3GcxQBhZkS_DQFoQFmQFgQFgQFeQFp9otherTimelS_DQGuQGsQGmQGmQGkQGv9numOtherslTQDqTQEdZQGbMFNbKQEgKQEtZk@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy7setAttrMFNbPvkZ2goFNbPSQElQEjQEdQEdQEbQEm3GcxQBikZkS_DQFqQFoQFiQFiQFgQFr9otherTimelS_DQGwQGuQGoQGoQGmQGx9numOtherslTQDrTQEfTkZQGfMFNbKQEjKQExKkZk@Base 12
+ _D4core8internal2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCsQCqQCkQCkQCiQCtQBy8minimizeMFNbZ2goFNbPSQEjQEhQEbQEbQDzQEk3GcxZvS_DQFkQFiQFcQFcQFaQFl9otherTimelS_DQGqQGoQGiQGiQGgQGr9numOtherslTQDnZQFtMFNbKQDzZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw14SENTINEL_EXTRAxk@Base 12
+ _D4core8internal2gc4impl12conservativeQw14baseOffsetBitsyG14G4m@Base 12
+ _D4core8internal2gc4impl12conservativeQw14bytesAllocatedm@Base 12
+ _D4core8internal2gc4impl12conservativeQw14numCollectionsm@Base 12
+ _D4core8internal2gc4impl12conservativeQw15LargeObjectPool10allocPagesMFNbmZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw15LargeObjectPool13runFinalizersMFNbMxAvZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw15LargeObjectPool18setFreePageOffsetsMFNbNimmZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw15LargeObjectPool6__initZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw15LargeObjectPool7getInfoMFNbPvZSQCt6memory8BlkInfo_@Base 12
+ _D4core8internal2gc4impl12conservativeQw15LargeObjectPool7getSizeMxFNbNimZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw15LargeObjectPool8getPagesMxFNbNiPvZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw15LargeObjectPool9freePagesMFNbNimmZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw15LargeObjectPool__T20mergeFreePageOffsetsVbi0Vbi1ZQBfMFNbNimmZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw15LargeObjectPool__T20mergeFreePageOffsetsVbi1Vbi1ZQBfMFNbNimmZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw15SmallObjectPool13runFinalizersMFNbMxAvZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw15SmallObjectPool6__initZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw15SmallObjectPool7getInfoMFNbPvZSQCt6memory8BlkInfo_@Base 12
+ _D4core8internal2gc4impl12conservativeQw15SmallObjectPool7getSizeMxFNbNiPvZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw15SmallObjectPool9allocPageMFNbEQCsQCqQCkQCkQCiQCt4BinsZPSQDsQDqQDkQDkQDiQDt4List@Base 12
+ _D4core8internal2gc4impl12conservativeQw17maxCollectionTimeSQCg4time8Duration@Base 12
+ _D4core8internal2gc4impl12conservativeQw18initialize_preciseFZCQCjQBy11gcinterface2GC@Base 12
+ _D4core8internal2gc4impl12conservativeQw18sentinel_InvariantFNbNixPvZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx10initializeMFZ23atforkHandlersInstalledb@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx10initializeMFZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx10removeRootMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx10rootsApplyMFNbMDFNbKSQCnQCc11gcinterface4RootZiZi@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx10smallAllocMFNbmKmkxC8TypeInfoZPv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx11__fieldDtorMFNbNiZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx11__xopEqualsMxFKxSQCjQChQCbQCbQBzQCkQBpZb@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx11collectForkMFNbbZEQCkQCiQCc2os11ChildStatus@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx11disableForkMFNbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx11fullcollectMFNbbbbZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx11rangesApplyMFNbMDFNbKSQCoQCd11gcinterface5RangeZiZi@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx11recoverPageMFNbPSQCjQChQCbQCbQBzQCk15SmallObjectPoolmEQDuQDsQDmQDmQDkQDv4BinsZb@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx11removeRangeMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx12collectRootsMFNbNlPvQcZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx12markParallelMFNbbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx13runFinalizersMFNbMxAvZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx14ScanThreadData6__initZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx14scanBackgroundMFNbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx15collectAllRootsMFNbbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx15fork_needs_lockb@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx15recoverNextPageMFNbEQCmQCkQCeQCeQCcQCn4BinsZb@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx15stopScanThreadsMFNbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx16startScanThreadsMFNbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx17collectInProgressMxFNbNdZb@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx17pullFromScanStackMFNbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx18maxParallelThreadsMFNbZi@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx18setNextRecoverPoolMFNbEQCpQCnQChQChQCfQCq4BinsmZPSQDqQDoQDiQDiQDgQDr15SmallObjectPool@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx19_d_gcx_atfork_childUZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx20_d_gcx_atfork_parentUZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx21_d_gcx_atfork_prepareUZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx23updateCollectThresholdsMFNbZ11smoothDecayFNaNbNiNfffZf@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx23updateCollectThresholdsMFNbZ3maxFNaNbNiNfffZf@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx23updateCollectThresholdsMFNbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx4DtorMFZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx5allocMFNbmKmkxC8TypeInfoZPv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx5sweepMFNbZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx6__initZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx6lowMemMxFNbNdZb@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx7addRootMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx7getInfoMFNbPvZSQCg6memory8BlkInfo_@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx7newPoolMFNbmbZPSQChQCfQBzQBzQBxQCi4Pool@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx7prepareMFNbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx7reserveMFNbmZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx8addRangeMFNbNiPvQcxC8TypeInfoZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx8bigAllocMFNbmKmkxC8TypeInfoZPv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx8binTableyG2049EQCgQCeQByQByQBwQCh4Bins@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx8ctfeBinsFNbZG2049EQCjQChQCbQCbQBzQCk4Bins@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx8findBaseMFNbNiPvZQd@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx8findPoolMFNaNbNiPvZPSQCmQCkQCeQCeQCcQCn4Pool@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx8findSizeMFNbNiPvZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx8instancePSQCbQBzQBtQBtQBrQCcQBh@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx8isMarkedMFNbNlPvZi@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx8markForkMFNbbbbZ13wrap_delegateUPvZi@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx8markForkMFNbbbbZEQCiQCgQCa2os11ChildStatus@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx8minimizeMFNbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx9InvariantMxFZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx9__xtoHashFNbNeKxSQCiQCgQCaQCaQByQCjQBoZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx9allocPageMFNbEQCfQCdQBxQBxQBvQCg4BinsZPSQDfQDdQCxQCxQCvQDg4List@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTPvZQr14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTPvZQr3popMFNaNbNiZQs@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTPvZQr4growMFNbNiZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTPvZQr4pushMFNbNiQqZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTPvZQr5clearMFNaNbNiNfZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTPvZQr5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTPvZQr5resetMFNbNiZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTPvZQr6__initZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTPvZQr6lengthMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTPvZQr7opIndexMNgFNaNbNcNimZNgPv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTPvZQr8opAssignMFNaNbNcNiNjNeSQDkQDiQDcQDcQDaQDlQCq__TQCpTQCfZQCxZQBl@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTPvZQr9popLockedMFNbNiKQwZb@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi0ZQpZQCf14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi0ZQpZQCf3popMFNaNbNiZQCh@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi0ZQpZQCf4growMFNbNiZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi0ZQpZQCf4pushMFNbNiQCfZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi0ZQpZQCf5clearMFNaNbNiNfZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi0ZQpZQCf5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi0ZQpZQCf5resetMFNbNiZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi0ZQpZQCf6__initZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi0ZQpZQCf6lengthMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi0ZQpZQCf7opIndexMNgFNaNbNcNimZNgSQFaQEyQEsQEsQEqQFbQEg__TQCsVbi0ZQDa@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi0ZQpZQCf8opAssignMFNaNbNcNiNjNeSQEzQExQErQErQEpQFaQEf__TQEeTQDuZQEmZQBl@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi0ZQpZQCf9popLockedMFNbNiKQClZb@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi1ZQpZQCf14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi1ZQpZQCf3popMFNaNbNiZQCh@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi1ZQpZQCf4growMFNbNiZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi1ZQpZQCf4pushMFNbNiQCfZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi1ZQpZQCf5clearMFNaNbNiNfZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi1ZQpZQCf5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi1ZQpZQCf5resetMFNbNiZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi1ZQpZQCf6__initZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi1ZQpZQCf6lengthMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi1ZQpZQCf7opIndexMNgFNaNbNcNimZNgSQFaQEyQEsQEsQEqQFbQEg__TQCsVbi1ZQDa@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi1ZQpZQCf8opAssignMFNaNbNcNiNjNeSQEzQExQErQErQEpQFaQEf__TQEeTQDuZQEmZQBl@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTSQCiQCgQCaQCaQByQCjQBo__T9ScanRangeVbi1ZQpZQCf9popLockedMFNbNiKQClZb@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11markPreciseVbi0ZQsMFNbNiNlPvQcZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T11markPreciseVbi1ZQsMFNbNiNlPvQcZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T16markConservativeVbi0ZQxMFNbNiNlPvQcZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T16markConservativeVbi1ZQxMFNbNiNlPvQcZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T21pullFromScanStackImplVbi0ZQBcMFNbNiZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T21pullFromScanStackImplVbi1ZQBcMFNbNiZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T4markVbi0Vbi0Vbi0ZQsMFNbNiNlSQCwQCuQCoQCoQCmQCxQCc__T9ScanRangeVbi0ZQpZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T4markVbi0Vbi0Vbi1ZQsMFNbNiNlSQCwQCuQCoQCoQCmQCxQCc__T9ScanRangeVbi0ZQpZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T4markVbi0Vbi1Vbi1ZQsMFNbNiNlSQCwQCuQCoQCoQCmQCxQCc__T9ScanRangeVbi0ZQpZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T4markVbi1Vbi0Vbi0ZQsMFNbNiNlSQCwQCuQCoQCoQCmQCxQCc__T9ScanRangeVbi1ZQpZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T4markVbi1Vbi0Vbi1ZQsMFNbNiNlSQCwQCuQCoQCoQCmQCxQCc__T9ScanRangeVbi1ZQpZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T4markVbi1Vbi1Vbi1ZQsMFNbNiNlSQCwQCuQCoQCoQCmQCxQCc__T9ScanRangeVbi1ZQpZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T7markAllS_DQCeQCcQBwQBwQBuQCfQBk__T11markPreciseVbi0ZQsMFNbNiNlPvQcZvZQCsMFNbbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T7markAllS_DQCeQCcQBwQBwQBuQCfQBk__T11markPreciseVbi1ZQsMFNbNiNlPvQcZvZQCsMFNbbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T7markAllS_DQCeQCcQBwQBwQBuQCfQBk__T16markConservativeVbi0ZQxMFNbNiNlPvQcZvZQCxMFNbbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T7markAllS_DQCeQCcQBwQBwQBuQCfQBk__T16markConservativeVbi1ZQxMFNbNiNlPvQcZvZQCxMFNbbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T9ScanRangeVbi0ZQp6__initZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw3Gcx__T9ScanRangeVbi1ZQp6__initZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw3setFNaNbNiKG4mmZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw4List6__initZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool10initializeMFNbmbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool12freePageBitsMFNbmKxG4mZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool15freeAllPageBitsMFNbmZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool16setPointerBitmapMFNbPvmmxC8TypeInfokZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool21setPointerBitmapSmallMFNbPvmmkxC8TypeInfoZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool4DtorMFNbZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool6__initZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool6isFreeMxFNaNbNdNiNlNfZb@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool7ShiftBy6__initZ@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool7clrBitsMFNbNimkZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool7getBitsMFNbmZk@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool7setBitsMFNbmkZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool8findBaseMFNbNiPvZQd@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool8numPagesFNbNimZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool9InvariantMxFZv@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool9pagenumOfMxFNbNiPvZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool9slGetInfoMFNbPvZSQCj6memory8BlkInfo_@Base 12
+ _D4core8internal2gc4impl12conservativeQw4Pool9slGetSizeMFNbNiPvZm@Base 12
+ _D4core8internal2gc4impl12conservativeQw7binbaseyG15G256s@Base 12
+ _D4core8internal2gc4impl12conservativeQw7binsizeyG15s@Base 12
+ _D4core8internal2gc4impl12conservativeQw8freeTimel@Base 12
+ _D4core8internal2gc4impl12conservativeQw8lockTimel@Base 12
+ _D4core8internal2gc4impl12conservativeQw8markTimeSQBw4time8Duration@Base 12
+ _D4core8internal2gc4impl12conservativeQw8numFreesl@Base 12
+ _D4core8internal2gc4impl12conservativeQw8prepTimeSQBw4time8Duration@Base 12
+ _D4core8internal2gc4impl12conservativeQw9numOthersl@Base 12
+ _D4core8internal2gc4impl12conservativeQw9otherTimel@Base 12
+ _D4core8internal2gc4impl12conservativeQw9pauseTimeSQBx4time8Duration@Base 12
+ _D4core8internal2gc4impl12conservativeQw9sweepTimeSQBx4time8Duration@Base 12
+ _D4core8internal2gc4impl5protoQo11__moduleRefZ@Base 12
+ _D4core8internal2gc4impl5protoQo12__ModuleInfoZ@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC10removeRootMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC10rootsApplyMFMDFNbKSQChQBw11gcinterface4RootZiZi@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC11__fieldDtorMFNbNiZv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC11inFinalizerMFNbNiNfZb@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC11rangesApplyMFMDFNbKSQCiQBx11gcinterface5RangeZiZi@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC11removeRangeMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC12profileStatsMFNbNiNfZSQCk6memory2GC12ProfileStats@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC13runFinalizersMFNbMxAvZv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC14collectNoStackMFNbZv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC22transferRangesAndRootsMFZv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC24allocatedInCurrentThreadMFNbZm@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC4DtorMFZv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC4freeMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC5queryMFNbPvZSQCa6memory8BlkInfo_@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC5statsMFNbNiNfZSQCc6memory2GC5Stats@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC6__ctorMFZCQBxQBvQBpQBpQBnQByQBl@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC6__initZ@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC6__vtblZ@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC6addrOfMFNbNiPvZQd@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC6callocMFNbmkMxC8TypeInfoZPv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC6enableMFZv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC6extendMFNbPvmmMxC8TypeInfoZm@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC6mallocMFNbmkMxC8TypeInfoZPv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC6qallocMFNbmkMxC8TypeInfoZSQCn6memory8BlkInfo_@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC6sizeOfMFNbNiPvZm@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC7__ClassZ@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC7addRootMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC7clrAttrMFNbPvkZk@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC7collectMFNbZv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC7disableMFZv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC7getAttrMFNbPvZk@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC7reallocMFNbPvmkMxC8TypeInfoZQr@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC7reserveMFNbmZm@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC7setAttrMFNbPvkZk@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC8addRangeMFNbNiPvmxC8TypeInfoZv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC8minimizeMFNbZv@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC8rootIterMFNdNiNjZDFMDFNbKSQCnQCc11gcinterface4RootZiZi@Base 12
+ _D4core8internal2gc4impl5protoQo7ProtoGC9rangeIterMFNdNiNjZDFMDFNbKSQCoQCd11gcinterface5RangeZiZi@Base 12
+ _D4core8internal2gc4impl6manualQp10initializeFZCQBuQBj11gcinterface2GC@Base 12
+ _D4core8internal2gc4impl6manualQp11__moduleRefZ@Base 12
+ _D4core8internal2gc4impl6manualQp12__ModuleInfoZ@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC10__aggrDtorMFZv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC10removeRootMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC10rootsApplyMFMDFNbKSQCjQBy11gcinterface4RootZiZi@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC11__fieldDtorMFNbNiZv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC11inFinalizerMFNbNiNfZb@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC11rangesApplyMFMDFNbKSQCkQBz11gcinterface5RangeZiZi@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC11removeRangeMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC12profileStatsMFNbNiNfZSQCm6memory2GC12ProfileStats@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC13runFinalizersMFNbMxAvZv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC14collectNoStackMFNbZv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC24allocatedInCurrentThreadMFNbZm@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC4freeMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC5queryMFNbPvZSQCc6memory8BlkInfo_@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC5statsMFNbNiNfZSQCe6memory2GC5Stats@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC6__ctorMFZCQBzQBxQBrQBrQBpQCaQBm@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC6__dtorMFZv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC6__initZ@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC6__vtblZ@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC6addrOfMFNbNiPvZQd@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC6callocMFNbmkxC8TypeInfoZPv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC6enableMFZv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC6extendMFNbPvmmxC8TypeInfoZm@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC6mallocMFNbmkxC8TypeInfoZPv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC6qallocMFNbmkMxC8TypeInfoZSQCp6memory8BlkInfo_@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC6sizeOfMFNbNiPvZm@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC7__ClassZ@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC7addRootMFNbNiPvZv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC7clrAttrMFNbPvkZk@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC7collectMFNbZv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC7disableMFZv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC7getAttrMFNbPvZk@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC7reallocMFNbPvmkxC8TypeInfoZQq@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC7reserveMFNbmZm@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC7setAttrMFNbPvkZk@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC8addRangeMFNbNiPvmxC8TypeInfoZv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC8minimizeMFNbZv@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC8rootIterMFNdNiNjZDFMDFNbKSQCpQCe11gcinterface4RootZiZi@Base 12
+ _D4core8internal2gc4impl6manualQp8ManualGC9rangeIterMFNdNiNjZDFMDFNbKSQCqQCf11gcinterface5RangeZiZi@Base 12
+ _D4core8internal2gc5proxy11__moduleRefZ@Base 12
+ _D4core8internal2gc5proxy12__ModuleInfoZ@Base 12
+ _D4core8internal2gc5proxy12instanceLockOSQBnQBl8spinlock8SpinLock@Base 12
+ _D4core8internal2gc5proxy14isInstanceInitb@Base 12
+ _D4core8internal2gc5proxy8instanceFNbNiNeZCQBpQBe11gcinterface2GC@Base 12
+ _D4core8internal2gc5proxy9_instanceCQBiQx11gcinterface2GC@Base 12
+ _D4core8internal2gc5proxy9proxiedGCCQBiQx11gcinterface2GC@Base 12
+ _D4core8internal2gc9pooltable11__moduleRefZ@Base 12
+ _D4core8internal2gc9pooltable12__ModuleInfoZ@Base 12
+ _D4core8internal2gc9pooltable__T9PoolTableTSQBqQBoQBi4impl12conservativeQCe4PoolZQBx4DtorMFNbNiZv@Base 12
+ _D4core8internal2gc9pooltable__T9PoolTableTSQBqQBoQBi4impl12conservativeQCe4PoolZQBx6__initZ@Base 12
+ _D4core8internal2gc9pooltable__T9PoolTableTSQBqQBoQBi4impl12conservativeQCe4PoolZQBx6insertMFNbNiPQCdZb@Base 12
+ _D4core8internal2gc9pooltable__T9PoolTableTSQBqQBoQBi4impl12conservativeQCe4PoolZQBx6lengthMxFNaNbNdNiNlNfZm@Base 12
+ _D4core8internal2gc9pooltable__T9PoolTableTSQBqQBoQBi4impl12conservativeQCe4PoolZQBx7maxAddrMxFNaNbNdNiNfZPxv@Base 12
+ _D4core8internal2gc9pooltable__T9PoolTableTSQBqQBoQBi4impl12conservativeQCe4PoolZQBx7minAddrMxFNaNbNdNiNfZPxv@Base 12
+ _D4core8internal2gc9pooltable__T9PoolTableTSQBqQBoQBi4impl12conservativeQCe4PoolZQBx7opIndexMNgFNaNbNcNiNjNemZNgPSQEiQEgQEaQCsQCqQEjQCf@Base 12
+ _D4core8internal2gc9pooltable__T9PoolTableTSQBqQBoQBi4impl12conservativeQCe4PoolZQBx7opSliceMNgFNaNbNiNjNeZANgPSQEgQEeQDyQCqQCoQEhQCd@Base 12
+ _D4core8internal2gc9pooltable__T9PoolTableTSQBqQBoQBi4impl12conservativeQCe4PoolZQBx7opSliceMNgFNaNbNiNjNemmZANgPSQEiQEgQEaQCsQCqQEjQCf@Base 12
+ _D4core8internal2gc9pooltable__T9PoolTableTSQBqQBoQBi4impl12conservativeQCe4PoolZQBx8findPoolMFNaNbNiPvZPQCk@Base 12
+ _D4core8internal2gc9pooltable__T9PoolTableTSQBqQBoQBi4impl12conservativeQCe4PoolZQBx8minimizeMFNaNbNiZ4swapFNaNbNiNfKPQCxKQfZv@Base 12
+ _D4core8internal2gc9pooltable__T9PoolTableTSQBqQBoQBi4impl12conservativeQCe4PoolZQBx8minimizeMFNaNbNiZAPQCj@Base 12
+ _D4core8internal2gc9pooltable__T9PoolTableTSQBqQBoQBi4impl12conservativeQCe4PoolZQBx9InvariantMxFNaNbNiZv@Base 12
+ _D4core8internal3utf10UTF8strideyAi@Base 12
+ _D4core8internal3utf10toUCSindexFNaNbNiNfMxAwmZm@Base 12
+ _D4core8internal3utf10toUCSindexFNaNfMxAamZm@Base 12
+ _D4core8internal3utf10toUCSindexFNaNfMxAumZm@Base 12
+ _D4core8internal3utf10toUTFindexFNaNbNiNfMxAumZm@Base 12
+ _D4core8internal3utf10toUTFindexFNaNbNiNfMxAwmZm@Base 12
+ _D4core8internal3utf10toUTFindexFNaNfMxAamZm@Base 12
+ _D4core8internal3utf11__moduleRefZ@Base 12
+ _D4core8internal3utf12__ModuleInfoZ@Base 12
+ _D4core8internal3utf12isValidDcharFNaNbNiNfwZb@Base 12
+ _D4core8internal3utf6decodeFNaNfMxAaKmZw@Base 12
+ _D4core8internal3utf6decodeFNaNfMxAuKmZw@Base 12
+ _D4core8internal3utf6decodeFNaNfMxAwKmZw@Base 12
+ _D4core8internal3utf6encodeFNaNbNfKAawZv@Base 12
+ _D4core8internal3utf6encodeFNaNbNfKAuwZv@Base 12
+ _D4core8internal3utf6encodeFNaNbNfKAwwZv@Base 12
+ _D4core8internal3utf6strideFNaNbNiNfMxAamZk@Base 12
+ _D4core8internal3utf6strideFNaNbNiNfMxAumZk@Base 12
+ _D4core8internal3utf6strideFNaNbNiNfMxAwmZk@Base 12
+ _D4core8internal3utf6toUTF8FNaNbNfNkMAyaZQe@Base 12
+ _D4core8internal3utf6toUTF8FNaNbNiNfNkMAawZQe@Base 12
+ _D4core8internal3utf6toUTF8FNaNeMxAuZAya@Base 12
+ _D4core8internal3utf6toUTF8FNaNeMxAwZAya@Base 12
+ _D4core8internal3utf7toUTF16FNaNbNeMxAwZAyu@Base 12
+ _D4core8internal3utf7toUTF16FNaNbNfNkMAyuZQe@Base 12
+ _D4core8internal3utf7toUTF16FNaNbNiNfNkMAuwZQe@Base 12
+ _D4core8internal3utf7toUTF16FNaNeMxAaZAyu@Base 12
+ _D4core8internal3utf7toUTF32FNaNbNfNkMAywZQe@Base 12
+ _D4core8internal3utf7toUTF32FNaNeMxAaZAyw@Base 12
+ _D4core8internal3utf7toUTF32FNaNeMxAuZAyw@Base 12
+ _D4core8internal3utf8toUTF16zFNaNfMxAaZPxu@Base 12
+ _D4core8internal3utf__T8validateTAyaZQoFNaNfMxAyaZv@Base 12
+ _D4core8internal3utf__T8validateTAyuZQoFNaNfMxAyuZv@Base 12
+ _D4core8internal3utf__T8validateTAywZQoFNaNfMxAywZv@Base 12
+ _D4core8internal4hash11__moduleRefZ@Base 12
+ _D4core8internal4hash12__ModuleInfoZ@Base 12
+ _D4core8internal4hash__T13coalesceFloatTdZQsFNaNbNiNfxdZd@Base 12
+ _D4core8internal4hash__T13coalesceFloatTeZQsFNaNbNiNfxeZe@Base 12
+ _D4core8internal4hash__T13coalesceFloatTfZQsFNaNbNiNfxfZf@Base 12
+ _D4core8internal4hash__T6hashOfTAxE2rt4util7utility16__c_complex_realZQBuFNaNbNiNfQBymZm@Base 12
+ _D4core8internal4hash__T6hashOfTAxE2rt4util7utility17__c_complex_floatZQBvFNaNbNiNfQBzmZm@Base 12
+ _D4core8internal4hash__T6hashOfTAxE2rt4util7utility18__c_complex_doubleZQBwFNaNbNiNfQCamZm@Base 12
+ _D4core8internal4hash__T6hashOfTAxPvZQnFNaNbNiNfMxAQrmZm@Base 12
+ _D4core8internal4hash__T6hashOfTAxPyS6object10ModuleInfoZQBhFNaNbNiNfMxAQBmmZm@Base 12
+ _D4core8internal4hash__T6hashOfTAxS3std5regexQBm2ir__T5GroupTmZQjZQBqFNaNbNiNfMxAQBvmZm@Base 12
+ _D4core8internal4hash__T6hashOfTAxaZQmFNaNbNiNfMxAamZm@Base 12
+ _D4core8internal4hash__T6hashOfTAxdZQmFNaNbNiNfMxAdmZm@Base 12
+ _D4core8internal4hash__T6hashOfTAxeZQmFNaNbNiNfMxAemZm@Base 12
+ _D4core8internal4hash__T6hashOfTAxfZQmFNaNbNiNfMxAfmZm@Base 12
+ _D4core8internal4hash__T6hashOfTAxhZQmFNaNbNiNfMxAhmZm@Base 12
+ _D4core8internal4hash__T6hashOfTAxkZQmFNaNbNiNfMxAkmZm@Base 12
+ _D4core8internal4hash__T6hashOfTAxmZQmFNaNbNiNfMxAmmZm@Base 12
+ _D4core8internal4hash__T6hashOfTAxtZQmFNaNbNiNfMxAtmZm@Base 12
+ _D4core8internal4hash__T6hashOfTAxvZQmFNaNbNiNfMxAvmZm@Base 12
+ _D4core8internal4hash__T6hashOfTAyaZQmFNaNbNiNfMxAyamZm@Base 12
+ _D4core8internal4hash__T6hashOfTAykZQmFNaNbNiNfMxAykmZm@Base 12
+ _D4core8internal4hash__T6hashOfTDFZvZQnFNaNbNiNeMxDQsmZm@Base 12
+ _D4core8internal4hash__T6hashOfTE3std3uni__T16UnicodeSetParserTSQBf5regexQCo6parser__T6ParserTAyaTSQCoQBjQDuQBg7CodeGenZQBiZQDc8OperatorZQEjFNaNbNiNexEQEoQEn__TQEmTQDxZQEuQBsZm@Base 12
+ _D4core8internal4hash__T6hashOfTE3std8encoding3BOMZQBbFNaNbNiNexEQBgQBfQzZm@Base 12
+ _D4core8internal4hash__T6hashOfTG2kZQmFNaNbNiNfKxG2kmZm@Base 12
+ _D4core8internal4hash__T6hashOfTPvZQlFNaNbNiNeMxPvmZm@Base 12
+ _D4core8internal4hash__T6hashOfTPxG32hZQpFNaNbNiNeMxPQtZm@Base 12
+ _D4core8internal4hash__T6hashOfTPxS3std11concurrency3TidZQBhFNaNbNiNeMxPQBmZm@Base 12
+ _D4core8internal4hash__T6hashOfTPxvZQmFNaNbNiNeMxPvZm@Base 12
+ _D4core8internal4hash__T6hashOfTPxvZQmFNaNbNiNeMxPvmZm@Base 12
+ _D4core8internal4hash__T6hashOfTS2rt4util7utility__T8_ComplexTdZQmZQBrFNaNbNiNfQBvmZm@Base 12
+ _D4core8internal4hash__T6hashOfTS2rt4util7utility__T8_ComplexTeZQmZQBrFNaNbNiNfQBvmZm@Base 12
+ _D4core8internal4hash__T6hashOfTS2rt4util7utility__T8_ComplexTfZQmZQBrFNaNbNiNfQBvmZm@Base 12
+ _D4core8internal4hash__T6hashOfTS3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQDiFNaNbNiNfKxSQDoQDn__TQDlVii10TaVQDgi1TiZQEeFNaNbNiNfiZQCnmZm@Base 12
+ _D4core8internal4hash__T6hashOfTS3std5range__T10OnlyResultTaZQpZQBoFNaNbNiNfKxSQBuQBt__TQBqTaZQBwmZm@Base 12
+ _D4core8internal4hash__T6hashOfTS3std5range__T4TakeTSQuQs__T6RepeatTaZQkZQBbZQCbFNaNbNiNfKxSQChQCg__TQCdTQCbZQClmZm@Base 12
+ _D4core8internal4hash__T6hashOfTaZQkFNaNbNiNexaZm@Base 12
+ _D4core8internal4hash__T6hashOfTbZQkFNaNbNiNexbZm@Base 12
+ _D4core8internal4hash__T6hashOfTbZQkFNaNbNiNexbmZm@Base 12
+ _D4core8internal4hash__T6hashOfTdZQkFNaNbNiNexdZm@Base 12
+ _D4core8internal4hash__T6hashOfTdZQkFNaNbNiNexdmZm@Base 12
+ _D4core8internal4hash__T6hashOfTeZQkFNaNbNiNexeZm@Base 12
+ _D4core8internal4hash__T6hashOfTeZQkFNaNbNiNexemZm@Base 12
+ _D4core8internal4hash__T6hashOfTfZQkFNaNbNiNexfZm@Base 12
+ _D4core8internal4hash__T6hashOfTfZQkFNaNbNiNexfmZm@Base 12
+ _D4core8internal4hash__T6hashOfThZQkFNaNbNiNexhZm@Base 12
+ _D4core8internal4hash__T6hashOfTiZQkFNaNbNiNexiZm@Base 12
+ _D4core8internal4hash__T6hashOfTkZQkFNaNbNiNexkZm@Base 12
+ _D4core8internal4hash__T6hashOfTkZQkFNaNbNiNexkmZm@Base 12
+ _D4core8internal4hash__T6hashOfTmZQkFNaNbNiNexmZm@Base 12
+ _D4core8internal4hash__T6hashOfTmZQkFNaNbNiNexmmZm@Base 12
+ _D4core8internal4hash__T6hashOfTtZQkFNaNbNiNextZm@Base 12
+ _D4core8internal4hash__T6hashOfTuZQkFNaNbNiNexuZm@Base 12
+ _D4core8internal4hash__T6hashOfTxC15TypeInfo_StructZQBcFNbNfxQBcZm@Base 12
+ _D4core8internal4hash__T6hashOfTxC8TypeInfoZQuFNbNfxQtZm@Base 12
+ _D4core8internal4hash__T6hashOfTxE2rt4util7utility16__c_complex_realZQBtFNaNbNiNfKxQBymZm@Base 12
+ _D4core8internal4hash__T6hashOfTxE2rt4util7utility17__c_complex_floatZQBuFNaNbNiNfKxQBzmZm@Base 12
+ _D4core8internal4hash__T6hashOfTxE2rt4util7utility18__c_complex_doubleZQBvFNaNbNiNfKxQCamZm@Base 12
+ _D4core8internal4hash__T6hashOfTxS3std3uni21DecompressedIntervalsZQBqFNaNbNiNfKxQBvZm@Base 12
+ _D4core8internal4hash__T6hashOfTxS3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBhZQCgFNaNbNiNfKxQClZm@Base 12
+ _D4core8internal4hash__T6hashOfTxS3std3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQCiFNaNbNiNfKxQCnZm@Base 12
+ _D4core8internal4hash__T6hashOfTxS3std3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQCjFNaNbNiNfKxQCoZm@Base 12
+ _D4core8internal4hash__T6hashOfTxS3std3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplZQCjFNaNbNiNfKxQCoZm@Base 12
+ _D4core8internal4hash__T6hashOfTxS3std9algorithm9iteration__T12FilterResultSQBq8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda1TSQDn5range__T4iotaTmTmZQkFmmZ6ResultZQDwZQFlFNaNbNiNfKxQFqZm@Base 12
+ _D4core8internal4hash__T6hashOfTxS3std9algorithm9iteration__T12FilterResultSQBq8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda3TSQDn5range__T4iotaTmTxmZQlFmxmZ6ResultZQDyZQFnFNaNbNiNfKxQFsZm@Base 12
+ _D4core8internal4hash__T6hashOfTxS3std9algorithm9iteration__T6joinerTSQBkQBjQBc__T9MapResultSQCh8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQEeQEdQDw__T12FilterResultSQFfQCyQCsQCmMxFNbNdZ9__lambda1TSQGl5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGdZQHcFQGyZQyZQIyFNaNbNiNfKxQJdZm@Base 12
+ _D4core8internal4hash__T6hashOfTxS3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa4_615b305dVQpa1_61ZQBhTSQDq3uni21DecompressedIntervalsZQDuZQFjFNaNbNiNfKxQFoZm@Base 12
+ _D4core8internal4hash__T6hashOfTxS3std9algorithm9iteration__T9MapResultSQBm10functional__T8unaryFunVAyaa4_615b315dVQpa1_61ZQBhTSQDq3uni21DecompressedIntervalsZQDuZQFjFNaNbNiNfKxQFoZm@Base 12
+ _D4core8internal4hash__T6hashOfTxS3std9algorithm9iteration__T9MapResultSQBm8bitmanip8BitArray7bitsSetMxFNbNdZ9__lambda2TSQDjQDiQDb__T12FilterResultSQEkQCyQCsQCmMxFNbNdZ9__lambda1TSQFq5range__T4iotaTmTmZQkFmmZ6ResultZQDfZQGdZQHsFNaNbNiNfKxQHxZm@Base 12
+ _D4core8internal4hash__T9bytesHashVbi0ZQpFNaNbNiNeMAxhmZm@Base 12
+ _D4core8internal4hash__T9bytesHashVbi1ZQpFNaNbNiNeMAxhmZm@Base 12
+ _D4core8internal4hash__T9get32bitsZQlFNaNbNiMPxhZk@Base 12
+ _D4core8internal4util4math11__moduleRefZ@Base 12
+ _D4core8internal4util4math12__ModuleInfoZ@Base 12
+ _D4core8internal4util4math__T3maxTmZQhFNaNbNiNfmmZm@Base 12
+ _D4core8internal4util4math__T3minTkZQhFNaNbNiNfkkZk@Base 12
+ _D4core8internal4util5array10arrayToPtrFNbNexAvZm@Base 12
+ _D4core8internal4util5array11__moduleRefZ@Base 12
+ _D4core8internal4util5array12__ModuleInfoZ@Base 12
+ _D4core8internal4util5array17_enforceNoOverlapFNbNfxAammxmZv@Base 12
+ _D4core8internal4util5array18_enforceSameLengthFNbNfxAaxmxmZv@Base 12
+ _D4core8internal4util5array21_enforceNoOverlapNogcFNbNfKxAammxmZv@Base 12
+ _D4core8internal4util5array22_enforceSameLengthNogcFNbNfKxAaxmxmZv@Base 12
+ _D4core8internal4util5array27enforceRawArraysConformableFNbNfxAaxmxAvxQdxbZv@Base 12
+ _D4core8internal4util5array31enforceRawArraysConformableNogcFNbNfxAaxmxAvxQdxbZv@Base 12
+ _D4core8internal4util5array6_storeG256a@Base 12
+ _D4core8internal4util5array__T12errorMessageTxmTxmZQvFNbNiNeMxPaxAaxmxmZAa@Base 12
+ _D4core8internal5abort11__moduleRefZ@Base 12
+ _D4core8internal5abort12__ModuleInfoZ@Base 12
+ _D4core8internal5abortQgFNbNiNfMAyaMQemZ8writeStrFNbNiNeMAAxaXv@Base 12
+ _D4core8internal5abortQgFNbNiNfMAyaMQemZv@Base 12
+ _D4core8internal5array10comparison11__moduleRefZ@Base 12
+ _D4core8internal5array10comparison12__ModuleInfoZ@Base 12
+ _D4core8internal5array10comparison__T5__cmpTaZQjFNaNbNiNeMxAaMxQeZi@Base 12
+ _D4core8internal5array10comparison__T5__cmpThZQjFNaNbNiNeMxAhMxQeZi@Base 12
+ _D4core8internal5array10operations10isBinaryOpFNaNbNiNfMAyaZb@Base 12
+ _D4core8internal5array10operations11__moduleRefZ@Base 12
+ _D4core8internal5array10operations12__ModuleInfoZ@Base 12
+ _D4core8internal5array10operations16isBinaryAssignOpFAyaZb@Base 12
+ _D4core8internal5array10operations8toStringFmZAya@Base 12
+ _D4core8internal5array10operations9isUnaryOpFNaNbNiNfMAyaZb@Base 12
+ _D4core8internal5array12construction11__moduleRefZ@Base 12
+ _D4core8internal5array12construction12__ModuleInfoZ@Base 12
+ _D4core8internal5array13concatenation11__moduleRefZ@Base 12
+ _D4core8internal5array13concatenation12__ModuleInfoZ@Base 12
+ _D4core8internal5array5utils11__moduleRefZ@Base 12
+ _D4core8internal5array5utils11gcStatsPureFNaNbZSQBu6memory2GC5Stats@Base 12
+ _D4core8internal5array5utils12__ModuleInfoZ@Base 12
+ _D4core8internal5array5utils14accumulatePureFNaNbAyaiQeQgmZ12impureBypassFNbNiQBdiQBhQBkmZm@Base 12
+ _D4core8internal5array5utils14accumulatePureFNaNbAyaiQeQgmZm@Base 12
+ _D4core8internal5array5utils__T16_d_HookTraceImplTAAyaS_DQCdQCbQBv8capacity__T22_d_arraysetlengthTImplHTQCcTQCfZ18_d_arraysetlengthTFNaNbNeNkMKQDpmZmVQDva79_43616e6e6f7420726573697a652061727261797320696620636f6d70696c696e6720776974686f757420737570706f727420666f722072756e74696d65207479706520696e666f726d6174696f6e21ZQKzFNaNbNeQKpiQKtNkMKQLbmZm@Base 12
+ _D4core8internal5array5utils__T16_d_HookTraceImplTAC3std3zip13ArchiveMemberS_DQCyQCwQCq8capacity__T22_d_arraysetlengthTImplHTQCxTQDaZ18_d_arraysetlengthTFNaNbNeNkMKQEkmZmVAyaa79_43616e6e6f7420726573697a652061727261797320696620636f6d70696c696e6720776974686f757420737570706f727420666f722072756e74696d65207479706520696e666f726d6174696f6e21ZQLuFNaNbNeQGuiQGyNkMKQLwmZm@Base 12
+ _D4core8internal5array5utils__T16_d_HookTraceImplTAC3std6socket7AddressS_DQCuQCsQCm8capacity__T22_d_arraysetlengthTImplHTQCtTQCwZ18_d_arraysetlengthTFNaNbNeNkMKQEgmZmVAyaa79_43616e6e6f7420726573697a652061727261797320696620636f6d70696c696e6720776974686f757420737570706f727420666f722072756e74696d65207479706520696e666f726d6174696f6e21ZQLqFNaNbNeQGuiQGyNkMKQLsmZm@Base 12
+ _D4core8internal5array5utils__T16_d_HookTraceImplTACQBy6thread10threadbase10ThreadBaseS_DQDjQDhQDb8capacity__T22_d_arraysetlengthTImplHTQDiTQDlZ18_d_arraysetlengthTFNaNbNeNkMKQEvmZmVAyaa79_43616e6e6f7420726573697a652061727261797320696620636f6d70696c696e6720776974686f757420737570706f727420666f722072756e74696d65207479706520696e666f726d6174696f6e21ZQMfFNaNbNeQGuiQGyNkMKQMhmZm@Base 12
+ _D4core8internal5array5utils__T16_d_HookTraceImplTAOaS_DQCcQCaQBu9appending__T19_d_arrayappendTImplHTQBzTOaZ15_d_arrayappendTFNaNbNcNeMNkKQDkMQDoZQDsVAyaa81_43616e6e6f7420617070656e6420746f20617272617920696620636f6d70696c696e6720776974686f757420737570706f727420666f722072756e74696d65207479706520696e666f726d6174696f6e21ZQLdFNaNbNeQGyiQHcMNkKQLfMQLjZQLn@Base 12
+ _D4core8internal5array5utils__T16_d_HookTraceImplTAOaS_DQCcQCaQBu9appending__T21_d_arrayappendcTXImplHTQCbTOaZ17_d_arrayappendcTXFNaNbNcNeMNkKQDomZQDtVAyaa81_43616e6e6f7420617070656e6420746f20617272617920696620636f6d70696c696e6720776974686f757420737570706f727420666f722072756e74696d65207479706520696e666f726d6174696f6e21ZQLeFNaNbNeQGyiQHcMNkKQLgmZQLl@Base 12
+ _D4core8internal5array5utils__T16_d_HookTraceImplTAOuS_DQCcQCaQBu9appending__T19_d_arrayappendTImplHTQBzTOuZ15_d_arrayappendTFNaNbNcNeMNkKQDkMQDoZQDsVAyaa81_43616e6e6f7420617070656e6420746f20617272617920696620636f6d70696c696e6720776974686f757420737570706f727420666f722072756e74696d65207479706520696e666f726d6174696f6e21ZQLdFNaNbNeQGyiQHcMNkKQLfMQLjZQLn@Base 12
+ _D4core8internal5array5utils__T16_d_HookTraceImplTAOuS_DQCcQCaQBu9appending__T21_d_arrayappendcTXImplHTQCbTOuZ17_d_arrayappendcTXFNaNbNcNeMNkKQDomZQDtVAyaa81_43616e6e6f7420617070656e6420746f20617272617920696620636f6d70696c696e6720776974686f757420737570706f727420666f722072756e74696d65207479706520696e666f726d6174696f6e21ZQLeFNaNbNeQGyiQHcMNkKQLgmZQLl@Base 12
+ _D4core8internal5array5utils__T16_d_HookTraceImplTAS3std3uni17CodepointIntervalS_DQDcQDaQCu8capacity__T22_d_arraysetlengthTImplHTQDbTQDeZ18_d_arraysetlengthTFNaNbNeNkMKQEomZmVAyaa79_43616e6e6f7420726573697a652061727261797320696620636f6d70696c696e6720776974686f757420737570706f727420666f722072756e74696d65207479706520696e666f726d6174696f6e21ZQLyFNaNbNeQGuiQGyNkMKQMamZm@Base 12
+ _D4core8internal5array5utils__T16_d_HookTraceImplTAS3std5regexQCd2ir10NamedGroupS_DQDdQDbQCv8capacity__T22_d_arraysetlengthTImplHTQDcTQDfZ18_d_arraysetlengthTFNaNbNeNkMKQEpmZmVAyaa79_43616e6e6f7420726573697a652061727261797320696620636f6d70696c696e6720776974686f757420737570706f727420666f722072756e74696d65207479706520696e666f726d6174696f6e21ZQLzFNaNbNeQGuiQGyNkMKQMbmZm@Base 12
+ _D4core8internal5array5utils__T16_d_HookTraceImplTAS3std5regexQCd2ir8BytecodeS_DQDaQCyQCs8capacity__T22_d_arraysetlengthTImplHTQCzTQDcZ18_d_arraysetlengthTFNaNbNeNkMKQEmmZmVAyaa79_43616e6e6f7420726573697a652061727261797320696620636f6d70696c696e6720776974686f757420737570706f727420666f722072756e74696d65207479706520696e666f726d6174696f6e21ZQLwFNaNbNeQGuiQGyNkMKQLymZm@Base 12
+ _D4core8internal5array5utils__T16_d_HookTraceImplTAS3std5regexQCd9kickstart__T7ShiftOrTaZQl11ShiftThreadS_DQEbQDzQDt8capacity__T22_d_arraysetlengthTImplHTQEaTQEdZ18_d_arraysetlengthTFNaNbNeNkMKQFnmZmVAyaa79_43616e6e6f7420726573697a652061727261797320696620636f6d70696c696e6720776974686f757420737570706f727420666f722072756e74696d65207479706520696e666f726d6174696f6e21ZQMxFNaNbNeQGuiQGyNkMKQMzmZm@Base 12
+ _D4core8internal5array5utils__T16_d_HookTraceImplTAS3std6socket11AddressInfoS_DQCzQCxQCr8capacity__T22_d_arraysetlengthTImplHTQCyTQDbZ18_d_arraysetlengthTFNaNbNeNkMKQElmZmVAyaa79_43616e6e6f7420726573697a652061727261797320696620636f6d70696c696e6720776974686f757420737570706f727420666f722072756e74696d65207479706520696e666f726d6174696f6e21ZQLvFNaNbNeQGuiQGyNkMKQLxmZm@Base 12
+ _D4core8internal5array5utils__T16_d_HookTraceImplTAaS_DQCbQBzQBt8capacity__T22_d_arraysetlengthTImplHTQCaTaZ18_d_arraysetlengthTFNaNbNeNkMKQDlmZmVAyaa79_43616e6e6f7420726573697a652061727261797320696620636f6d70696c696e6720776974686f757420737570706f727420666f722072756e74696d65207479706520696e666f726d6174696f6e21ZQKvFNaNbNeQGuiQGyNkMKQKxmZm@Base 12
+ _D4core8internal5array5utils__T16_d_HookTraceImplTAhS_DQCbQBzQBt8capacity__T22_d_arraysetlengthTImplHTQCaThZ18_d_arraysetlengthTFNaNbNeNkMKQDlmZmVAyaa79_43616e6e6f7420726573697a652061727261797320696620636f6d70696c696e6720776974686f757420737570706f727420666f722072756e74696d65207479706520696e666f726d6174696f6e21ZQKvFNaNbNeQGuiQGyNkMKQKxmZm@Base 12
+ _D4core8internal5array5utils__T16_d_HookTraceImplTAkS_DQCbQBzQBt8capacity__T22_d_arraysetlengthTImplHTQCaTkZ18_d_arraysetlengthTFNaNbNeNkMKQDlmZmVAyaa79_43616e6e6f7420726573697a652061727261797320696620636f6d70696c696e6720776974686f757420737570706f727420666f722072756e74696d65207479706520696e666f726d6174696f6e21ZQKvFNaNbNeQGuiQGyNkMKQKxmZm@Base 12
+ _D4core8internal5array5utils__T16_d_HookTraceImplTAlS_DQCbQBzQBt8capacity__T22_d_arraysetlengthTImplHTQCaTlZ18_d_arraysetlengthTFNaNbNeNkMKQDlmZmVAyaa79_43616e6e6f7420726573697a652061727261797320696620636f6d70696c696e6720776974686f757420737570706f727420666f722072756e74696d65207479706520696e666f726d6174696f6e21ZQKvFNaNbNeQGuiQGyNkMKQKxmZm@Base 12
+ _D4core8internal5array5utils__T16_d_HookTraceImplTAmS_DQCbQBzQBt8capacity__T22_d_arraysetlengthTImplHTQCaTmZ18_d_arraysetlengthTFNaNbNeNkMKQDlmZmVAyaa79_43616e6e6f7420726573697a652061727261797320696620636f6d70696c696e6720776974686f757420737570706f727420666f722072756e74696d65207479706520696e666f726d6174696f6e21ZQKvFNaNbNeQGuiQGyNkMKQKxmZm@Base 12
+ _D4core8internal5array5utils__T16_d_HookTraceImplTAuS_DQCbQBzQBt8capacity__T22_d_arraysetlengthTImplHTQCaTuZ18_d_arraysetlengthTFNaNbNeNkMKQDlmZmVAyaa79_43616e6e6f7420726573697a652061727261797320696620636f6d70696c696e6720776974686f757420737570706f727420666f722072756e74696d65207479706520696e666f726d6174696f6e21ZQKvFNaNbNeQGuiQGyNkMKQKxmZm@Base 12
+ _D4core8internal5array5utils__T16_d_HookTraceImplTAvS_DQCbQBzQBt8capacity__T22_d_arraysetlengthTImplHTQCaTvZ18_d_arraysetlengthTFNaNbNeNkMKQDlmZmVAyaa79_43616e6e6f7420726573697a652061727261797320696620636f6d70696c696e6720776974686f757420737570706f727420666f722072756e74696d65207479706520696e666f726d6174696f6e21ZQKvFNaNbNeQGuiQGyNkMKQKxmZm@Base 12
+ _D4core8internal5array5utils__T16_d_HookTraceImplTAwS_DQCbQBzQBt8capacity__T22_d_arraysetlengthTImplHTQCaTwZ18_d_arraysetlengthTFNaNbNeNkMKQDlmZmVAyaa79_43616e6e6f7420726573697a652061727261797320696620636f6d70696c696e6720776974686f757420737570706f727420666f722072756e74696d65207479706520696e666f726d6174696f6e21ZQKvFNaNbNeQGuiQGyNkMKQKxmZm@Base 12
+ _D4core8internal5array5utils__T16_d_HookTraceImplTAyaS_DQCcQCaQBu8capacity__T22_d_arraysetlengthTImplHTQCbTyaZ18_d_arraysetlengthTFNaNbNeNkMKQDnmZmVQDua79_43616e6e6f7420726573697a652061727261797320696620636f6d70696c696e6720776974686f757420737570706f727420666f722072756e74696d65207479706520696e666f726d6174696f6e21ZQKxFNaNbNeQKoiQKsNkMKQKzmZm@Base 12
+ _D4core8internal5array7casting11__moduleRefZ@Base 12
+ _D4core8internal5array7casting12__ModuleInfoZ@Base 12
+ _D4core8internal5array7casting__T11__ArrayCastThTuZQsFNaNbNiNeNkMAhZAu@Base 12
+ _D4core8internal5array7casting__T11__ArrayCastThTuZQsFNaNiNeNkMAhZ5Array6__initZ@Base 12
+ _D4core8internal5array7casting__T11__ArrayCastThTwZQsFNaNbNiNeNkMAhZAw@Base 12
+ _D4core8internal5array7casting__T11__ArrayCastThTwZQsFNaNiNeNkMAhZ5Array6__initZ@Base 12
+ _D4core8internal5array7casting__T11__ArrayCastTvTS3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe4NodeZQHqFNaNbNiNeNkMAvZAQHt@Base 12
+ _D4core8internal5array7casting__T11__ArrayCastTvTS3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe4NodeZQHqFNaNiNeNkMAvZ5Array6__initZ@Base 12
+ _D4core8internal5array7casting__T11__ArrayCastTvTmZQsFNaNbNiNeNkMAvZAm@Base 12
+ _D4core8internal5array7casting__T11__ArrayCastTvTmZQsFNaNiNeNkMAvZ5Array6__initZ@Base 12
+ _D4core8internal5array7casting__T11__ArrayCastTxhTxuZQuFNaNbNiNeNkMAxhZAxu@Base 12
+ _D4core8internal5array7casting__T11__ArrayCastTxhTxuZQuFNaNiNeNkMAxhZ5Array6__initZ@Base 12
+ _D4core8internal5array7casting__T11__ArrayCastTxhTxwZQuFNaNbNiNeNkMAxhZAxw@Base 12
+ _D4core8internal5array7casting__T11__ArrayCastTxhTxwZQuFNaNiNeNkMAxhZ5Array6__initZ@Base 12
+ _D4core8internal5array7casting__T16onArrayCastErrorZQtFNaNbNiNeAyammQfmZv@Base 12
+ _D4core8internal5array8capacity11__moduleRefZ@Base 12
+ _D4core8internal5array8capacity12__ModuleInfoZ@Base 12
+ _D4core8internal5array8capacity__T22_d_arraysetlengthTImplHTAAyaTQeZ18_d_arraysetlengthTFNaNbNeNkMKQBnmZm@Base 12
+ _D4core8internal5array8capacity__T22_d_arraysetlengthTImplHTAC3std3zip13ArchiveMemberTQzZ18_d_arraysetlengthTFNaNbNeNkMKQCimZm@Base 12
+ _D4core8internal5array8capacity__T22_d_arraysetlengthTImplHTAC3std6socket7AddressTQvZ18_d_arraysetlengthTFNaNbNeNkMKQCemZm@Base 12
+ _D4core8internal5array8capacity__T22_d_arraysetlengthTImplHTACQCi6thread10threadbase10ThreadBaseTQBkZ18_d_arraysetlengthTFNaNbNeNkMKQCumZm@Base 12
+ _D4core8internal5array8capacity__T22_d_arraysetlengthTImplHTAS3std3uni17CodepointIntervalTQBdZ18_d_arraysetlengthTFNaNbNeNkMKQCnmZm@Base 12
+ _D4core8internal5array8capacity__T22_d_arraysetlengthTImplHTAS3std5regexQCn2ir10NamedGroupTQBeZ18_d_arraysetlengthTFNaNbNeNkMKQComZm@Base 12
+ _D4core8internal5array8capacity__T22_d_arraysetlengthTImplHTAS3std5regexQCn2ir8BytecodeTQBbZ18_d_arraysetlengthTFNaNbNeNkMKQClmZm@Base 12
+ _D4core8internal5array8capacity__T22_d_arraysetlengthTImplHTAS3std5regexQCn9kickstart__T7ShiftOrTaZQl11ShiftThreadTQCcZ18_d_arraysetlengthTFNaNbNeNkMKQDmmZm@Base 12
+ _D4core8internal5array8capacity__T22_d_arraysetlengthTImplHTAS3std6socket11AddressInfoTQBaZ18_d_arraysetlengthTFNaNbNeNkMKQCkmZm@Base 12
+ _D4core8internal5array8capacity__T22_d_arraysetlengthTImplHTAaTaZ18_d_arraysetlengthTFNaNbNeNkMKQBkmZm@Base 12
+ _D4core8internal5array8capacity__T22_d_arraysetlengthTImplHTAhThZ18_d_arraysetlengthTFNaNbNeNkMKQBkmZm@Base 12
+ _D4core8internal5array8capacity__T22_d_arraysetlengthTImplHTAkTkZ18_d_arraysetlengthTFNaNbNeNkMKQBkmZm@Base 12
+ _D4core8internal5array8capacity__T22_d_arraysetlengthTImplHTAlTlZ18_d_arraysetlengthTFNaNbNeNkMKQBkmZm@Base 12
+ _D4core8internal5array8capacity__T22_d_arraysetlengthTImplHTAmTmZ18_d_arraysetlengthTFNaNbNeNkMKQBkmZm@Base 12
+ _D4core8internal5array8capacity__T22_d_arraysetlengthTImplHTAuTuZ18_d_arraysetlengthTFNaNbNeNkMKQBkmZm@Base 12
+ _D4core8internal5array8capacity__T22_d_arraysetlengthTImplHTAvTvZ18_d_arraysetlengthTFNaNbNeNkMKQBkmZm@Base 12
+ _D4core8internal5array8capacity__T22_d_arraysetlengthTImplHTAwTwZ18_d_arraysetlengthTFNaNbNeNkMKQBkmZm@Base 12
+ _D4core8internal5array8capacity__T22_d_arraysetlengthTImplHTAyaTyaZ18_d_arraysetlengthTFNaNbNeNkMKQBmmZm@Base 12
+ _D4core8internal5array8equality11__moduleRefZ@Base 12
+ _D4core8internal5array8equality12__ModuleInfoZ@Base 12
+ _D4core8internal5array8equality__T8__equalsTE3std3uni__T16UnicodeSetParserTSQBf5regexQDa6parser__T6ParserTAyaTSQCoQBjQEgQBg7CodeGenZQBiZQDc8OperatorTQEbZQEpFNaNbNiNeMxAEQEuQEt__TQEsTQEdZQFaQByMxQBbZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTPxvTQeZQrFNaNbNiNeMxAPvMxQfZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTPyS6object10ModuleInfoTQxZQBkFNaNbNiNeMxAPyQBpMxQiZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTS3std5regexQBw2ir__T5GroupTmZQjTQBgZQBuFNaNbNiNfMAQByMQfZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTaTaZQoFNaNbNiNeMxAaMxQeZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTdTdZQoFNaNbNiNeMxAdMxQeZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTeTeZQoFNaNbNiNeMxAeMxQeZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTfTfZQoFNaNbNiNeMxAfMxQeZb@Base 12
+ _D4core8internal5array8equality__T8__equalsThThZQoFNaNbNiNeMxAhMxQeZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTiTiZQoFNaNbNiNeMxAiMxQeZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTkTkZQoFNaNbNiNeMxAkMxQeZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTmTmZQoFNaNbNiNeMxAmMxQeZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTtTtZQoFNaNbNiNeMxAtMxQeZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTuTuZQoFNaNbNiNeMxAuMxQeZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTwTwZQoFNaNbNiNeMxAwMxQeZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxAS3std3uni17CodepointIntervalTxQBfZQBuFNaNbNiNfMAxQByMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxAyaTxQfZQtFNaNbNiNfMAxQwMQfZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxC3std3zip13ArchiveMemberTxQBaZQBpFMAxQBlMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxDFNbC6ObjectZvTxQqZQBeFNaNbNiNfMAxQBiMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxE2rt4util7utility16__c_complex_realTxQBlZQCaFNaNbNiNfMAxQCeMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxE2rt4util7utility17__c_complex_floatTxQBmZQCbFNaNbNiNfMAxQCfMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxE2rt4util7utility18__c_complex_doubleTxQBnZQCcFNaNbNiNfMAxQCgMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxS2rt3aaA6BucketTxQrZQBfFNaNbNiNfMAxQBjMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxS3std12experimental9allocator15building_blocks14allocator_list__T13AllocatorListTSQDdQDcQCr8showcase14mmapRegionListFmZ7FactoryTSQEyQExQEmQEf14null_allocator13NullAllocatorZQEe4NodeTxQHbZQHqFNaNbNiNfMAxQHuMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxS3std3uni17CodepointIntervalTxQBeZQBtFNaNbNiNfMAxQBxMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxS3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBhTxQByZQCnFNaNbNiNfMAxQCrMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxS3std4file15DirIteratorImpl9DirHandleTxQBnZQCcFNaNbNiNfMAxQCgMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxS3std4file8DirEntryTxQvZQBjFNaNbNiNfMAxQBnMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxS3std4json9JSONValueTxQwZQBkFNaNbNiNfMAxQBoMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxS3std5regexQBx2ir10NamedGroupTxQBfZQBuFNaNbNiNfMAxQByMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxS3std5regexQBx2ir11CharMatcherTxQBgZQBvFNaNbNiNfMAxQBzMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxS3std5regexQBx2ir8BitTableTxQBcZQBrFNaNbNiNfMAxQBvMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxS3std5regexQBx2ir8BytecodeTxQBcZQBrFNaNbNiNfMAxQBvMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxS3std5regexQBx2ir__T5GroupTmZQjTxQBhZQBwFNaNbNiNfMAxQCaMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxS3std6getopt6OptionTxQvZQBjFNaNbNiNfMAxQBnMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxS3std6socket11AddressInfoTxQBbZQBqFMAxQBmMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxS3std8datetime8timezone13PosixTimeZone10LeapSecondTxQCaZQCpFNaNbNiNfMAxQCtMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxS3std8datetime8timezone13PosixTimeZone10TransitionTxQCaZQCpFNaNbNiNfMAxQCtMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxS3std8datetime8timezone13PosixTimeZone14TempTransitionTxQCeZQCtFNaNbNiNfMAxQCxMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxS3std8typecons__T5TupleTkTkTkZQnTxQBiZQBxFNaNbNiNfMAxQCbMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxSQBs8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks11ReplacementTxQCvZQDkFNaNbNiNfMAxQDoMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTxvTxvZQqFNaNbNiNfMAxvMQeZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTyS3stdQBr14unicode_tables15UnicodePropertyTyQBrZQCgFNaNbNiNfMAyQCkMQgZb@Base 12
+ _D4core8internal5array8equality__T8__equalsTyS3stdQBr14unicode_tables9CompEntryTyQBkZQBzFNaNbNiNfMAyQCdMQgZb@Base 12
+ _D4core8internal5array9appending11__moduleRefZ@Base 12
+ _D4core8internal5array9appending12__ModuleInfoZ@Base 12
+ _D4core8internal5array9appending__T19_d_arrayappendTImplHTAOaTOaZ15_d_arrayappendTFNaNbNcNeMNkKQBlMQBpZQBt@Base 12
+ _D4core8internal5array9appending__T19_d_arrayappendTImplHTAOuTOuZ15_d_arrayappendTFNaNbNcNeMNkKQBlMQBpZQBt@Base 12
+ _D4core8internal5array9appending__T21_d_arrayappendcTXImplHTAOaTOaZ17_d_arrayappendcTXFNaNbNcNeMNkKQBnmZQBs@Base 12
+ _D4core8internal5array9appending__T21_d_arrayappendcTXImplHTAOuTOuZ17_d_arrayappendcTXFNaNbNcNeMNkKQBnmZQBs@Base 12
+ _D4core8internal5qsort11__moduleRefZ@Base 12
+ _D4core8internal5qsort12__ModuleInfoZ@Base 12
+ _D4core8internal5qsort7_adSortUNkMAvC8TypeInfoZ3cmpUMxPvMxQeMPvZi@Base 12
+ _D4core8internal6atomic11__moduleRefZ@Base 12
+ _D4core8internal6atomic12__ModuleInfoZ@Base 12
+ _D4core8internal6atomic12simpleFormatFAyaMAQfZQi@Base 12
+ _D4core8internal6atomic5pauseFNaNbNiNeZv@Base 12
+ _D4core8internal6atomic__T10atomicLoadVEQBmQBb11MemoryOrderi0TPOS2rt9critical_18D_CRITICAL_SECTIONZQCvFNaNbNiNePNgPONgSQCcQCcQBvZNgQr@Base 12
+ _D4core8internal6atomic__T10atomicLoadVEQBmQBb11MemoryOrderi0TbZQBmFNaNbNiNePNgbZNgb@Base 12
+ _D4core8internal6atomic__T10atomicLoadVEQBmQBb11MemoryOrderi0TmZQBmFNaNbNiNePNgmZNgm@Base 12
+ _D4core8internal6atomic__T10atomicLoadVEQBmQBb11MemoryOrderi2TE3std12experimental6loggerQDi8LogLevelZQCxFNaNbNiNePNgEQCcQCbQBqQEuQBmZNgQt@Base 12
+ _D4core8internal6atomic__T10atomicLoadVEQBmQBb11MemoryOrderi2TOC3std12experimental6loggerQDj6LoggerZQCwFNaNbNiNePONgCQCbQCaQBpQEuQBlZONgQu@Base 12
+ _D4core8internal6atomic__T10atomicLoadVEQBmQBb11MemoryOrderi2TOCQCk4sync5mutex5MutexZQChFNaNbNiNePONgCQDwQBmQBkQBhZONgQr@Base 12
+ _D4core8internal6atomic__T10atomicLoadVEQBmQBb11MemoryOrderi2TPOS2rt8monitor_7MonitorZQCiFNaNbNiNePNgPONgSQBpQBpQBjZNgQr@Base 12
+ _D4core8internal6atomic__T10atomicLoadVEQBmQBb11MemoryOrderi2TPOS2rt9critical_18D_CRITICAL_SECTIONZQCvFNaNbNiNePNgPONgSQCcQCcQBvZNgQr@Base 12
+ _D4core8internal6atomic__T10atomicLoadVEQBmQBb11MemoryOrderi2TbZQBmFNaNbNiNePNgbZNgb@Base 12
+ _D4core8internal6atomic__T10atomicLoadVEQBmQBb11MemoryOrderi2TkZQBmFNaNbNiNePNgkZNgk@Base 12
+ _D4core8internal6atomic__T10atomicLoadVEQBmQBb11MemoryOrderi5TE3std11parallelism8TaskPool9PoolStateZQCwFNaNbNiNePNgEQCbQCaQBqQBkZNgQq@Base 12
+ _D4core8internal6atomic__T10atomicLoadVEQBmQBb11MemoryOrderi5TbZQBmFNaNbNiNePNgbZNgb@Base 12
+ _D4core8internal6atomic__T10atomicLoadVEQBmQBb11MemoryOrderi5ThZQBmFNaNbNiNePNghZNgh@Base 12
+ _D4core8internal6atomic__T10atomicLoadVEQBmQBb11MemoryOrderi5TiZQBmFNaNbNiNePNgiZNgi@Base 12
+ _D4core8internal6atomic__T10atomicLoadVEQBmQBb11MemoryOrderi5TkZQBmFNaNbNiNePNgkZNgk@Base 12
+ _D4core8internal6atomic__T10atomicLoadVEQBmQBb11MemoryOrderi5TmZQBmFNaNbNiNePNgmZNgm@Base 12
+ _D4core8internal6atomic__T11atomicStoreVEQBnQBc11MemoryOrderi0TbZQBnFNaNbNiNePbbZv@Base 12
+ _D4core8internal6atomic__T11atomicStoreVEQBnQBc11MemoryOrderi0TmZQBnFNaNbNiNePmmZv@Base 12
+ _D4core8internal6atomic__T11atomicStoreVEQBnQBc11MemoryOrderi3TC3std12experimental6loggerQDj6LoggerZQCwFNaNbNiNePQByQCbZv@Base 12
+ _D4core8internal6atomic__T11atomicStoreVEQBnQBc11MemoryOrderi3TE3std12experimental6loggerQDj8LogLevelZQCyFNaNbNiNePQCaQCdZv@Base 12
+ _D4core8internal6atomic__T11atomicStoreVEQBnQBc11MemoryOrderi3TPOS2rt8monitor_7MonitorZQCjFNaNbNiNePQBlQBoZv@Base 12
+ _D4core8internal6atomic__T11atomicStoreVEQBnQBc11MemoryOrderi3TPOS2rt9critical_18D_CRITICAL_SECTIONZQCwFNaNbNiNePQByQCbZv@Base 12
+ _D4core8internal6atomic__T11atomicStoreVEQBnQBc11MemoryOrderi3TbZQBnFNaNbNiNePbbZv@Base 12
+ _D4core8internal6atomic__T11atomicStoreVEQBnQBc11MemoryOrderi3TmZQBnFNaNbNiNePmmZv@Base 12
+ _D4core8internal6atomic__T11atomicStoreVEQBnQBc11MemoryOrderi5TE3std11parallelism8TaskPool9PoolStateZQCxFNaNbNiNePQBzQCcZv@Base 12
+ _D4core8internal6atomic__T11atomicStoreVEQBnQBc11MemoryOrderi5ThZQBnFNaNbNiNePhhZv@Base 12
+ _D4core8internal6atomic__T11atomicStoreVEQBnQBc11MemoryOrderi5TkZQBnFNaNbNiNePkkZv@Base 12
+ _D4core8internal6atomic__T11atomicStoreVEQBnQBc11MemoryOrderi5TmZQBnFNaNbNiNePmmZv@Base 12
+ _D4core8internal6atomic__T14atomicFetchAddVEQBqQBf11MemoryOrderi5Vbi1TkZQBuFNaNbNiNePkkZk@Base 12
+ _D4core8internal6atomic__T14atomicFetchAddVEQBqQBf11MemoryOrderi5Vbi1TmZQBuFNaNbNiNePmmZm@Base 12
+ _D4core8internal6atomic__T14atomicFetchSubVEQBqQBf11MemoryOrderi5Vbi1TkZQBuFNaNbNiNePkkZk@Base 12
+ _D4core8internal6atomic__T14atomicFetchSubVEQBqQBf11MemoryOrderi5Vbi1TmZQBuFNaNbNiNePmmZm@Base 12
+ _D4core8internal6atomic__T25atomicCompareExchangeImplVEQCbQBq11MemoryOrderi5VQxi5Vbi0TCQDh4sync5mutex5MutexZQDeFNaNbNiNePQBjQeQBoZb@Base 12
+ _D4core8internal6atomic__T25atomicCompareExchangeImplVEQCbQBq11MemoryOrderi5VQxi5Vbi0TE3std11parallelism8TaskPool9PoolStateZQDuFNaNbNiNePQBzQeQCeZb@Base 12
+ _D4core8internal6atomic__T25atomicCompareExchangeImplVEQCbQBq11MemoryOrderi5VQxi5Vbi0TbZQCkFNaNbNiNePbQcbZb@Base 12
+ _D4core8internal6atomic__T25atomicCompareExchangeImplVEQCbQBq11MemoryOrderi5VQxi5Vbi0ThZQCkFNaNbNiNePhQchZb@Base 12
+ _D4core8internal6atomic__T25atomicCompareExchangeImplVEQCbQBq11MemoryOrderi5VQxi5Vbi0TmZQCkFNaNbNiNePmQcmZb@Base 12
+ _D4core8internal6atomic__T25atomicCompareExchangeImplVEQCbQBq11MemoryOrderi5VQxi5Vbi0TtZQCkFNaNbNiNePtQctZb@Base 12
+ _D4core8internal6atomic__T35atomicCompareExchangeStrongNoResultVEQClQCa11MemoryOrderi5VQxi5TCQDn4sync5mutex5MutexZQDkFNaNbNiNePQBjxCQFaQBnQBlQBiQCaZb@Base 12
+ _D4core8internal6atomic__T35atomicCompareExchangeStrongNoResultVEQClQCa11MemoryOrderi5VQxi5TE3std11parallelism8TaskPool9PoolStateZQEaFNaNbNiNePQBzxEQCdQCcQBsQBmQCqZb@Base 12
+ _D4core8internal6atomic__T35atomicCompareExchangeStrongNoResultVEQClQCa11MemoryOrderi5VQxi5TbZQCqFNaNbNiNePbxbbZb@Base 12
+ _D4core8internal6atomic__T35atomicCompareExchangeStrongNoResultVEQClQCa11MemoryOrderi5VQxi5ThZQCqFNaNbNiNePhxhhZb@Base 12
+ _D4core8internal6atomic__T35atomicCompareExchangeStrongNoResultVEQClQCa11MemoryOrderi5VQxi5TmZQCqFNaNbNiNePmxmmZb@Base 12
+ _D4core8internal6atomic__T35atomicCompareExchangeStrongNoResultVEQClQCa11MemoryOrderi5VQxi5TtZQCqFNaNbNiNePtxttZb@Base 12
+ _D4core8internal6moving11__moduleRefZ@Base 12
+ _D4core8internal6moving12__ModuleInfoZ@Base 12
+ _D4core8internal6string11__moduleRefZ@Base 12
+ _D4core8internal6string12__ModuleInfoZ@Base 12
+ _D4core8internal6string__T17TempStringNoAllocVhi20ZQz3getMNgFNaNbNiNjNfZANga@Base 12
+ _D4core8internal6string__T17TempStringNoAllocVhi20ZQz6__initZ@Base 12
+ _D4core8internal6string__T18signedToTempStringVki10ZQBaFNaNbNiNflZSQCnQClQCf__T17TempStringNoAllocVhi20ZQz@Base 12
+ _D4core8internal6string__T20unsignedToTempStringVii10ZQBcFNaNbNiNfmNkMAaZQd@Base 12
+ _D4core8internal6string__T20unsignedToTempStringVii16ZQBcFNaNbNiNfmNkMAaZQd@Base 12
+ _D4core8internal6string__T20unsignedToTempStringVki10ZQBcFNaNbNiNfmNkMAaZQd@Base 12
+ _D4core8internal6string__T20unsignedToTempStringVki10ZQBcFNaNbNiNfmZSQCpQCnQCh__T17TempStringNoAllocVhi20ZQz@Base 12
+ _D4core8internal6string__T7dstrcmpZQjFNaNbNiNeMxAaMxQeZi@Base 12
+ _D4core8internal6string__T9numDigitsVki10ZQqFNaNbNiNfmZi@Base 12
+ _D4core8internal6traits11__moduleRefZ@Base 12
+ _D4core8internal6traits12__ModuleInfoZ@Base 12
+ _D4core8internal6traits23__InoutWorkaroundStruct6__initZ@Base 12
+ _D4core8internal7convert10ctfe_allocFNaNbNiNemZ5allocFNaNbNfmZAh@Base 12
+ _D4core8internal7convert10ctfe_allocFNaNbNiNemZAh@Base 12
+ _D4core8internal7convert11__moduleRefZ@Base 12
+ _D4core8internal7convert11shiftrRoundFNaNbNiNfmZm@Base 12
+ _D4core8internal7convert12__ModuleInfoZ@Base 12
+ _D4core8internal7convert5Float6__initZ@Base 12
+ _D4core8internal7convert7binPow2FNaNbNiNfiZ10binPosPow2FNaNbNiNfiZe@Base 12
+ _D4core8internal7convert7binPow2FNaNbNiNfiZe@Base 12
+ _D4core8internal7convert__T20denormalizedMantissaTeZQzFNaNbNiNfekZSQCnQClQCf5Float@Base 12
+ _D4core8internal7convert__T5parseVbi0HTeZQoFNaNbNiNfeZSQCbQBzQBt5Float@Base 12
+ _D4core8internal7convert__T5parseVbi0HTxeZQpFNaNbNiNfxeZSQCdQCbQBv5Float@Base 12
+ _D4core8internal7convert__T7binLog2TeZQlFNaNbNiNfxeZk@Base 12
+ _D4core8internal7convert__T7toUbyteTPxvZQnFNaNbNiNeNkMxAPvZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteTPyS6object10ModuleInfoZQBgFNaNbNiNeNkMxAPyQBoZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteTS3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6ResultZQDjFNaNbNiNeNkKxSQDqQDp__TQDnVii10TaVQDii1TiZQEgFNaNbNiNfiZQCpZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteTS3std5range__T10OnlyResultTaZQpZQBpFNaNbNiNeNkKxSQBwQBv__TQBsTaZQByZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteTS3std5range__T4TakeTSQuQs__T6RepeatTaZQkZQBbZQCcFNaNbNiNeNkKxSQCjQCi__TQCfTQCdZQCnZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteTS3std5range__T4iotaTmTmZQkFmmZ6ResultZQBvFNaNbNiNeNkKxSQCcQCb__TQByTmTmZQCgFmmZQBxZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteTS3std5range__T4iotaTmTxmZQlFmxmZ6ResultZQBxFNaNbNiNeNkKxSQCeQCd__TQCaTmTxmZQCjFmxmZQBzZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteTS3std5range__T6RepeatTaZQkZQBkFNaNbNiNeNkKxSQBrQBq__TQBnTaZQBtZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteTS3std5regexQBo2ir__T5GroupTmZQjZQBpFNaNbNiNeNkKxSQBwQBvQDgQBs__TQBsTmZQByZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteTS3std5regexQBo2ir__T5GroupTmZQjZQBpFNaNbNiNeNkMxASQBxQBwQDhQBt__TQBtTmZQBzZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteTS3std8bitmanip__T7BitsSetTmZQlZQBoFNaNbNiNeNkKxSQBvQBu__TQBoTmZQBuZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteTaZQlFNaNbNiNeKxaZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteTaZQlFNaNbNiNeNkMxAaZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteTbZQlFNaNbNiNeKxbZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteTeZQlFNaNbNiNeKxeZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteThZQlFNaNbNiNeNkMxAhZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteTkZQlFNaNbNiNeKxkZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteTkZQlFNaNbNiNeNkMxAkZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteTmZQlFNaNbNiNeKxmZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteTmZQlFNaNbNiNeNkMxAmZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteTtZQlFNaNbNiNeKxtZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteTtZQlFNaNbNiNeNkMxAtZAxh@Base 12
+ _D4core8internal7convert__T7toUbyteTvZQlFNaNbNiNeNkMxAvZAxh@Base 12
+ _D4core8internal7dassert11__moduleRefZ@Base 12
+ _D4core8internal7dassert12__ModuleInfoZ@Base 12
+ _D4core8internal7dassert15invertCompTokenFNaNbNiNfMAyaZQe@Base 12
+ _D4core8internal7dassert16calcFieldOverlapFMxAmZAb@Base 12
+ _D4core8internal7dassert7combineFNaNbNiNfMxAAyaMxQfMxQkZ11formatTupleFNaNbNiNfMAaKmIQBpIbZv@Base 12
+ _D4core8internal7dassert7combineFNaNbNiNfMxAAyaMxQfMxQkZAya@Base 12
+ _D4core8internal7dassert9pureAllocFNaNbNiNfmZAh@Base 12
+ _D4core8internal7dassert9pureAllocFmZ5allocFNaNbNfmZAh@Base 12
+ _D4core8internal7dassert__T20assumeFakeAttributesTPFNaNbNfmZAhZQBkFNaNbNiNeQzZPFNaNbNiNfmZQBe@Base 12
+ _D4core8internal7switch_11__moduleRefZ@Base 12
+ _D4core8internal7switch_12__ModuleInfoZ@Base 12
+ _D4core8internal7switch___T14__switchSearchTyaZQuFNaNbNiNfMxAAyaMxQfZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa0_VxQia5_61626f7274VxQza5_7072696e74VxQBqa6_69676e6f7265VxQCka9_646570726563617465ZQDxFNaNbNiNfMxQDxZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa0_VxQia5_61626f7274ZQBmFNaNbNiNfMxQBmZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa0_VxQia7_70726563697365VxQBda12_636f6e736572766174697665ZQCxFNaNbNiNfMxQCxZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa0_VxQia8_72756e2d6d61696eVxQBfa9_746573742d6f6e6c79VxQCfa12_746573742d6f722d6d61696eZQDzFNaNbNiNfMxQDzZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa0_VxQia8_72756e2d6d61696eZQBsFNaNbNiNfMxQBsZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa0_ZQvFNaNbNiNfMxQuZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa12_636f6e736572766174697665ZQBuFNaNbNiNfMxQBuZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa12_746573742d6f722d6d61696eZQBuFNaNbNiNfMxQBuZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa18_5275737369612054696d65205a6f6e652033VxQBta19_5275737369612054696d65205a6f6e65203130VxQDoa19_5275737369612054696d65205a6f6e65203131VxQFja21_42656c61727573205374616e646172642054696d65ZQHvFNaNbNiNfMxQHvZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa18_5275737369612054696d65205a6f6e652033VxQBta19_5275737369612054696d65205a6f6e65203130ZQEbFNaNbNiNfMxQEbZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa18_5275737369612054696d65205a6f6e652033ZQCgFNaNbNiNfMxQCgZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa1_3cVxQka1_3eVxQta2_213dVxQBea2_3c3dVxQBqa2_3d3dVxQCca2_3e3dVxQCoa2_696eVxQDaa2_6973VxQDma3_21696eVxQEaa3_216973ZQFbFNaNbNiNfMxQFbZ5casesyG10Aa@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa1_3cVxQka1_3eVxQta2_213dVxQBea2_3c3dVxQBqa2_3d3dVxQCca2_3e3dVxQCoa2_696eVxQDaa2_6973VxQDma3_21696eVxQEaa3_216973ZQFbFNaNbNiNfMxQFbZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa21_42656c61727573205374616e646172642054696d65ZQCmFNaNbNiNfMxQCmZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa2_6763VxQma4_666f726bVxQBba7_636c65616e7570VxQBxa7_64697361626c65VxQCta7_70726f66696c65VxQDpa8_706172616c6c656cVxQEna11_696e63506f6f6c53697a65VxQFsa11_696e697452657365727665VxQGxa11_6d6178506f6f6c53697a65VxQIca11_6d696e506f6f6c53697a65VxQJha14_6865617053697a65466163746f72ZQLfFNaNbNiNfMxQLfZ5casesyG11Aa@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa2_6763VxQma4_666f726bVxQBba7_636c65616e7570VxQBxa7_64697361626c65VxQCta7_70726f66696c65VxQDpa8_706172616c6c656cVxQEna11_696e63506f6f6c53697a65VxQFsa11_696e697452657365727665VxQGxa11_6d6178506f6f6c53697a65VxQIca11_6d696e506f6f6c53697a65VxQJha14_6865617053697a65466163746f72ZQLfFNaNbNiNfMxQLfZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa3_726566VxQoa4_70757265VxQBda5_406c697665VxQBva5_406e6f6763VxQCna5_4073616665VxQDfa5_636f6e7374VxQDxa5_696e6f7574VxQEpa5_73636f7065VxQFha6_72657475726eVxQGba6_736861726564VxQGva7_4073797374656dVxQHra7_6e6f7468726f77VxQIna8_4074727573746564VxQJla9_4070726f7065727479VxQKla9_696d6d757461626c65ZQLyFNaNbNiNfMxQLyZ5casesyG15Aa@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa3_726566VxQoa4_70757265VxQBda5_406c697665VxQBva5_406e6f6763VxQCna5_4073616665VxQDfa5_636f6e7374VxQDxa5_696e6f7574VxQEpa5_73636f7065VxQFha6_72657475726eVxQGba6_736861726564VxQGva7_4073797374656dVxQHra7_6e6f7468726f77VxQIna8_4074727573746564VxQJla9_4070726f7065727479VxQKla9_696d6d757461626c65ZQLyFNaNbNiNfMxQLyZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa4_6e6f6e65VxQqa7_636f6c6c656374VxQBla8_66696e616c697a65ZQCwFNaNbNiNfMxQCwZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa4_6e6f6e65ZQBdFNaNbNiNfMxQBdZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa6_69676e6f7265VxQua9_646570726563617465ZQCgFNaNbNiNfMxQCgZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa6_69676e6f7265ZQBhFNaNbNiNfMxQBhZi@Base 12
+ _D4core8internal7switch___T8__switchTyaVxAyaa8_66696e616c697a65ZQBlFNaNbNiNfMxQBlZi@Base 12
+ _D4core8internal7switch___T8__switchTyaZQnFNaNbNiNfMxAyaZi@Base 12
+ _D4core8internal8lifetime11__moduleRefZ@Base 12
+ _D4core8internal8lifetime12__ModuleInfoZ@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTAyaTQeTQhZQxFKQoKQrZ1S11__xopEqualsMxFKxSQDcQDaQCu__TQCoTQCfTQCjTQCnZQDeFKQCwKQDaZQCkZb@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTAyaTQeTQhZQxFKQoKQrZ1S6__initZ@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTAyaTQeTQhZQxFKQoKQrZ1S9__xtoHashFNbNeKxSQDbQCzQCt__TQCnTQCeTQCiTQCmZQDdFKQCvKQCzZQCjZm@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTAyaTQeTQhZQxFKQoKQrZ1S__T6__ctorZQiMFNaNbNcNiNfKQBwZSQDoQDmQDg__TQDaTQCrTQCvTQCzZQDqFKQDiKQDmZQCw@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTAyaTQeTQhZQxFNaNbNiNfKQwKQzZv@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTC3std3zip13ArchiveMemberTQzTQBcZQBtFKQBlKQBpZ1S11__xopEqualsMxFKxSQEbQDzQDt__TQDnTQDeTQDiTQDmZQEdFKQDvKQDzZQCkZb@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTC3std3zip13ArchiveMemberTQzTQBcZQBtFKQBlKQBpZ1S6__initZ@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTC3std3zip13ArchiveMemberTQzTQBcZQBtFKQBlKQBpZ1S9__xtoHashFNbNeKxSQEaQDyQDs__TQDmTQDdTQDhTQDlZQEcFKQDuKQDyZQCjZm@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTC3std3zip13ArchiveMemberTQzTQBcZQBtFKQBlKQBpZ1S__T6__ctorZQiMFNaNbNcNiNfKQCvZSQEnQElQEf__TQDzTQDqTQDuTQDyZQEpFKQEhKQElZQCw@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTC3std3zip13ArchiveMemberTQzTQBcZQBtFNaNbNiNfKQBtKQBxZv@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std10functional__T7memoizeS_DQBe5regex__T9regexImplTAyaZQpFNfxAyaAxaZSQCtQBpQEi2ir__T5RegexTaZQjVii8ZQDfFxQBsQBsZ5ValueTQEsTQEwZQFnFKQFfKQFjZ1S11__xopEqualsMxFKxSQHvQHtQHn__TQHhTQGyTQHcTQHgZQHxFKQHpKQHtZQCkZb@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std10functional__T7memoizeS_DQBe5regex__T9regexImplTAyaZQpFNfxAyaAxaZSQCtQBpQEi2ir__T5RegexTaZQjVii8ZQDfFxQBsQBsZ5ValueTQEsTQEwZQFnFKQFfKQFjZ1S6__initZ@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std10functional__T7memoizeS_DQBe5regex__T9regexImplTAyaZQpFNfxAyaAxaZSQCtQBpQEi2ir__T5RegexTaZQjVii8ZQDfFxQBsQBsZ5ValueTQEsTQEwZQFnFKQFfKQFjZ1S9__xtoHashFNbNeKxSQHuQHsQHm__TQHgTQGxTQHbTQHfZQHwFKQHoKQHsZQCjZm@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std10functional__T7memoizeS_DQBe5regex__T9regexImplTAyaZQpFNfxAyaAxaZSQCtQBpQEi2ir__T5RegexTaZQjVii8ZQDfFxQBsQBsZ5ValueTQEsTQEwZQFnFKQFfKQFjZ1S__T6__ctorZQiMFNaNbNcNiNfKQGpZSQIhQIfQHz__TQHtTQHkTQHoTQHsZQIjFKQIbKQIfZQCw@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std10functional__T7memoizeS_DQBe5regex__T9regexImplTAyaZQpFNfxAyaAxaZSQCtQBpQEi2ir__T5RegexTaZQjVii8ZQDfFxQBsQBsZ5ValueTQEsTQEwZQFnFNaNbNiNfKQFnKQFrZv@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std10functional__T7memoizeS_DQBe5regex__T9regexImplTAyaZQpFNfxAyaAxaZSQCtQBpQEi2ir__T5RegexTaZQjVii8ZQDfFxQBsQBsZ5ValueTQEsTxQClTQCmTQCmZQFwFKQFoKxQDhKQDiQDhZ1S11__xopEqualsMxFKxSQImQIkQIe__TQHyTQHpTQHtTxQFmTQFnTQFnZQIxFKQIpKxQGiKQGjQGiZQDbZb@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std10functional__T7memoizeS_DQBe5regex__T9regexImplTAyaZQpFNfxAyaAxaZSQCtQBpQEi2ir__T5RegexTaZQjVii8ZQDfFxQBsQBsZ5ValueTQEsTxQClTQCmTQCmZQFwFKQFoKxQDhKQDiQDhZ1S6__initZ@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std10functional__T7memoizeS_DQBe5regex__T9regexImplTAyaZQpFNfxAyaAxaZSQCtQBpQEi2ir__T5RegexTaZQjVii8ZQDfFxQBsQBsZ5ValueTQEsTxQClTQCmTQCmZQFwFKQFoKxQDhKQDiQDhZ1S9__xtoHashFNbNeKxSQIlQIjQId__TQHxTQHoTQHsTxQFlTQFmTQFmZQIwFKQIoKxQGhKQGiQGhZQDaZm@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std10functional__T7memoizeS_DQBe5regex__T9regexImplTAyaZQpFNfxAyaAxaZSQCtQBpQEi2ir__T5RegexTaZQjVii8ZQDfFxQBsQBsZ5ValueTQEsTxQClTQCmTQCmZQFwFKQFoKxQDhKQDiQDhZ1S__T6__ctorZQiMFNaNbNcNiNfKxQEvKQEwQEvZSQJgQJeQIy__TQIsTQIjTQInTxQGgTQGhTQGhZQJrFKQJjKxQHcKQHdQHcZQDv@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std10functional__T7memoizeS_DQBe5regex__T9regexImplTAyaZQpFNfxAyaAxaZSQCtQBpQEi2ir__T5RegexTaZQjVii8ZQDfFxQBsQBsZ5ValueTQEsTxQClTQCmTQCmZQFwFNaNbNiNfKQFwKxQDpKQDqQDpZv@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std11concurrency3TidTQxTQBaZQBrFKQBjKQBnZ1S11__xopEqualsMxFKxSQDzQDxQDr__TQDlTQDcTQDgTQDkZQEbFKQDtKQDxZQCkZb@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std11concurrency3TidTQxTQBaZQBrFKQBjKQBnZ1S6__initZ@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std11concurrency3TidTQxTQBaZQBrFKQBjKQBnZ1S9__xtoHashFNbNeKxSQDyQDwQDq__TQDkTQDbTQDfTQDjZQEaFKQDsKQDwZQCjZm@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std11concurrency3TidTQxTQBaZQBrFKQBjKQBnZ1S__T6__ctorZQiMFNaNbNcNiNfKQCtZSQElQEjQEd__TQDxTQDoTQDsTQDwZQEnFKQEfKQEjZQCw@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std11concurrency3TidTQxTQBaZQBrFNaNbNiNfKQBrKQBvZv@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std11concurrency3TidTQxZQBnFNaNbNiNfKQBnKQBrZv@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std11concurrency__T4ListTSQBbQBa7MessageZQw4NodeTQBzTQBcZQCuFKQCmKQBpZ1S11__fieldDtorMFNeZv@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std11concurrency__T4ListTSQBbQBa7MessageZQw4NodeTQBzTQBcZQCuFKQCmKQBpZ1S11__xopEqualsMxFKxSQFcQFaQEu__TQEoTQEfTQEjTQDmZQFeFKQEwKQDzZQCkZb@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std11concurrency__T4ListTSQBbQBa7MessageZQw4NodeTQBzTQBcZQCuFKQCmKQBpZ1S15__fieldPostblitMFNlZv@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std11concurrency__T4ListTSQBbQBa7MessageZQw4NodeTQBzTQBcZQCuFKQCmKQBpZ1S6__initZ@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std11concurrency__T4ListTSQBbQBa7MessageZQw4NodeTQBzTQBcZQCuFKQCmKQBpZ1S8opAssignMFNcNjSQEzQExQEr__TQElTQEcTQEgTQDjZQFbFKQEtKQDwZQChZQBu@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std11concurrency__T4ListTSQBbQBa7MessageZQw4NodeTQBzTQBcZQCuFKQCmKQBpZ1S9__xtoHashFNbNeKxSQFbQEzQEt__TQEnTQEeTQEiTQDlZQFdFKQEvKQDyZQCjZm@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std11concurrency__T4ListTSQBbQBa7MessageZQw4NodeTQBzTQBcZQCuFKQCmKQBpZ1S__T6__ctorZQiMFNcKQCnZSQFgQFeQEy__TQEsTQEjTQEnTQDqZQFiFKQFaKQEdZQCo@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std11concurrency__T4ListTSQBbQBa7MessageZQw4NodeTQBzTQBcZQCuFKQCmKQBpZv@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGqTQJiZQJzFNaNbNiNfKQJzZv@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std3net4curl3FTP4ImplTQyZQBoFNaNbNiNfKQBoZv@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std3net4curl4HTTP4ImplTQzZQBpFNaNbNiNfKQBpZv@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std3net4curl4SMTP4ImplTQzZQBpFNaNbNiNfKQBpZv@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std3uni17CodepointIntervalTQBdTQBhZQByFKQBqKQBuZ1S11__xopEqualsMxFKxSQEgQEeQDy__TQDsTQDjTQDnTQDrZQEiFKQEaKQEeZQCkZb@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std3uni17CodepointIntervalTQBdTQBhZQByFKQBqKQBuZ1S6__initZ@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std3uni17CodepointIntervalTQBdTQBhZQByFKQBqKQBuZ1S__T6__ctorZQiMFNaNbNcNiNfKQDaZSQEsQEqQEk__TQEeTQDvTQDzTQEdZQEuFKQEmKQEqZQCw@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std3uni17CodepointIntervalTQBdTQBhZQByFNaNbNiNfKQByKQCcZv@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std4file15DirIteratorImplTQBcTAyaTEQBkQBj8SpanModeTbZQCqFKQCiKQBgKQBgKbZ1S11__fieldDtorMFNeZv@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std4file15DirIteratorImplTQBcTAyaTEQBkQBj8SpanModeTbZQCqFKQCiKQBgKQBgKbZ1S11__xopEqualsMxFKxSQFeQFcQEw__TQEqTQEhTQElTQDjTQDjTbZQFmFKQFeKQEcKQEcKbZQCwZb@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std4file15DirIteratorImplTQBcTAyaTEQBkQBj8SpanModeTbZQCqFKQCiKQBgKQBgKbZ1S6__initZ@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std4file15DirIteratorImplTQBcTAyaTEQBkQBj8SpanModeTbZQCqFKQCiKQBgKQBgKbZ1S8opAssignMFNcNjNeSQFdQFbQEv__TQEpTQEgTQEkTQDiTQDiTbZQFlFKQFdKQEbKQEbKbZQCvZQCg@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std4file15DirIteratorImplTQBcTAyaTEQBkQBj8SpanModeTbZQCqFKQCiKQBgKQBgKbZ1S9__xtoHashFNbNeKxSQFdQFbQEv__TQEpTQEgTQEkTQDiTQDiTbZQFlFKQFdKQEbKQEbKbZQCvZm@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std4file15DirIteratorImplTQBcTAyaTEQBkQBj8SpanModeTbZQCqFKQCiKQBgKQBgKbZ1S__T6__ctorZQiMFNcNfKQCmKQCmKbZSQFqQFoQFi__TQFcTQEtTQExTQDvTQDvTbZQFyFKQFqKQEoKQEoKbZQDi@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std4file15DirIteratorImplTQBcTAyaTEQBkQBj8SpanModeTbZQCqFNfKQCkKQBiKQBiKbZv@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std5regexQBt2ir10NamedGroupTQBeTQBiZQBzFKQBrKQBvZ1S11__xopEqualsMxFKxSQEhQEfQDz__TQDtTQDkTQDoTQDsZQEjFKQEbKQEfZQCkZb@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std5regexQBt2ir10NamedGroupTQBeTQBiZQBzFKQBrKQBvZ1S6__initZ@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std5regexQBt2ir10NamedGroupTQBeTQBiZQBzFKQBrKQBvZ1S9__xtoHashFNbNeKxSQEgQEeQDy__TQDsTQDjTQDnTQDrZQEiFKQEaKQEeZQCjZm@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std5regexQBt2ir10NamedGroupTQBeTQBiZQBzFKQBrKQBvZ1S__T6__ctorZQiMFNaNbNcNiNfKQDbZSQEtQErQEl__TQEfTQDwTQEaTQEeZQEvFKQEnKQErZQCw@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std5regexQBt2ir10NamedGroupTQBeTQBiZQBzFNaNbNiNfKQBzKQCdZv@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std5regexQBt2ir8BytecodeTQBbTQBfZQBwFKQBoKQBsZ1S6__initZ@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std5regexQBt2ir8BytecodeTQBbTQBfZQBwFKQBoKQBsZ1S__T6__ctorZQiMFNaNbNcNiNfKQCyZSQEqQEoQEi__TQEcTQDtTQDxTQEbZQEsFKQEkKQEoZQCw@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std5regexQBt2ir8BytecodeTQBbTQBfZQBwFNaNbNiNfKQBwKQCaZv@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std6socket11AddressInfoTQBaTQBeZQBvFKQBnKQBrZ1S11__xopEqualsMxFKxSQEdQEbQDv__TQDpTQDgTQDkTQDoZQEfFKQDxKQEbZQCkZb@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std6socket11AddressInfoTQBaTQBeZQBvFKQBnKQBrZ1S6__initZ@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std6socket11AddressInfoTQBaTQBeZQBvFKQBnKQBrZ1S9__xtoHashFNbNeKxSQEcQEaQDu__TQDoTQDfTQDjTQDnZQEeFKQDwKQEaZQCjZm@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std6socket11AddressInfoTQBaTQBeZQBvFKQBnKQBrZ1S__T6__ctorZQiMFNaNbNcNiNfKQCxZSQEpQEnQEh__TQEbTQDsTQDwTQEaZQErFKQEjKQEnZQCw@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTS3std6socket11AddressInfoTQBaTQBeZQBvFNaNbNiNfKQBvKQBzZv@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTaTaTaZQtFKaKaZ1S6__initZ@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTaTaTaZQtFKaKaZ1S__T6__ctorZQiMFNaNbNcNiNfKaZSQDgQDeQCy__TQCsTaTaTaZQDcFKaKaZQCk@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTaTaTaZQtFNaNbNiNfKaKaZv@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTkTkTkZQtFKkKkZ1S6__initZ@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTkTkTkZQtFKkKkZ1S__T6__ctorZQiMFNaNbNcNiNfKkZSQDgQDeQCy__TQCsTkTkTkZQDcFKkKkZQCk@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTkTkTkZQtFNaNbNiNfKkKkZv@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTwTwTwZQtFKwKwZ1S6__initZ@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTwTwTwZQtFKwKwZ1S__T6__ctorZQiMFNaNbNcNiNfKwZSQDgQDeQCy__TQCsTwTwTwZQDcFKwKwZQCk@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTwTwTwZQtFNaNbNiNfKwKwZv@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTxaTaTxaZQvFKaKxaZ1S6__initZ@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTxaTaTxaZQvFKaKxaZ1S__T6__ctorZQiMFNaNbNcNiNfKxaZSQDkQDiQDc__TQCwTxaTaTxaZQDiFKaKxaZQCo@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTxaTaTxaZQvFNaNbNiNfKaKxaZv@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTyaTaThZQuFKaKhZ1S6__initZ@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTyaTaThZQuFKaKhZ1S__T6__ctorZQiMFNaNbNcNiNfKhZSQDhQDfQCz__TQCtTyaTaThZQDeFKaKhZQCl@Base 12
+ _D4core8internal8lifetime__T10emplaceRefTyaTaThZQuFNaNbNiNfKaKhZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTS3gcc8sections3elf9ThreadDSOZQByFNaNbNiNeMKQBrZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTS3std11concurrency7MessageZQBwFNaNbNiNeMKQBpZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTS3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGqZQKdFNaNbNiNeMKQJwZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTS3std3net4curl3FTP4ImplZQBtFNaNbNiNeMKQBmZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTS3std3net4curl4HTTP4ImplZQBuFNaNbNiNeMKQBnZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTS3std3net4curl4SMTP4ImplZQBuFNaNbNiNeMKQBnZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTS3std3uni17CodepointIntervalZQByFNaNbNiNeKQBqZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTS3std4file15DirIteratorImplZQBxFNaNbNiNeMKQBqZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTS3std5stdio4FileZQBmFNaNbNiNeMKQBfZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTSQBw2gc11gcinterface4RootZQBvFNaNbNiNeMKQBoZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTSQBw2gc11gcinterface5RangeZQBwFNaNbNiNeMKQBpZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTSQBwQBu9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQDoFNaNbNiNeMKQDhZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTSQBwQBu9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQDqFNaNbNiNeMKQDjZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTSQBwQBu9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQDoFNaNbNiNeMKQDhZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTSQBwQBuQBo__T10emplaceRefTAyaTQeTQhZQxFKQoKQrZ1SZQCsFNaNbNiNeMKQClZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTSQBwQBuQBo__T10emplaceRefTC3std3zip13ArchiveMemberTQzTQBcZQBtFKQBlKQBpZ1SZQDrFNaNbNiNeMKQDkZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTSQBwQBuQBo__T10emplaceRefTS3std10functional__T7memoizeS_DQBe5regex__T9regexImplTAyaZQpFNfxAyaAxaZSQCtQBpQFq2ir__T5RegexTaZQjVii8ZQDfFxQBsQBsZ5ValueTQEsTQEwZQFnFKQFfKQFjZ1SZQHlFNaNbNiNeMKQHeZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTSQBwQBuQBo__T10emplaceRefTS3std10functional__T7memoizeS_DQBe5regex__T9regexImplTAyaZQpFNfxAyaAxaZSQCtQBpQFq2ir__T5RegexTaZQjVii8ZQDfFxQBsQBsZ5ValueTQEsTxQClTQCmTQCmZQFwFKQFoKxQDhKQDiQDhZ1SZQIcFNaNbNiNeMKQHvZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTSQBwQBuQBo__T10emplaceRefTS3std11concurrency3TidTQxTQBaZQBrFKQBjKQBnZ1SZQDpFNaNbNiNeMKQDiZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTSQBwQBuQBo__T10emplaceRefTS3std11concurrency__T4ListTSQBbQBa7MessageZQw4NodeTQBzTQBcZQCuFKQCmKQBpZ1SZQEsFNaNbNiNeMKQElZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTSQBwQBuQBo__T10emplaceRefTS3std3uni17CodepointIntervalTQBdTQBhZQByFKQBqKQBuZ1SZQDwFNaNbNiNeKQDoZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTSQBwQBuQBo__T10emplaceRefTS3std4file15DirIteratorImplTQBcTAyaTEQBkQBj8SpanModeTbZQCqFKQCiKQBgKQBgKbZ1SZQEuFNaNbNiNeMKQEnZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTSQBwQBuQBo__T10emplaceRefTS3std5regexQDb2ir10NamedGroupTQBeTQBiZQBzFKQBrKQBvZ1SZQDxFNaNbNiNeMKQDqZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTSQBwQBuQBo__T10emplaceRefTS3std5regexQDb2ir8BytecodeTQBbTQBfZQBwFKQBoKQBsZ1SZQDuFNaNbNiNeKQDmZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTSQBwQBuQBo__T10emplaceRefTS3std6socket11AddressInfoTQBaTQBeZQBvFKQBnKQBrZ1SZQDtFNaNbNiNeMKQDmZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTSQBwQBuQBo__T10emplaceRefTaTaTaZQtFKaKaZ1SZQCmFNaNbNiNeKQCeZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTSQBwQBuQBo__T10emplaceRefTkTkTkZQtFKkKkZ1SZQCmFNaNbNiNeKQCeZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTSQBwQBuQBo__T10emplaceRefTwTwTwZQtFKwKwZ1SZQCmFNaNbNiNeKQCeZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTSQBwQBuQBo__T10emplaceRefTxaTaTxaZQvFKaKxaZ1SZQCpFNaNbNiNeKQChZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTSQBwQBuQBo__T10emplaceRefTyaTaThZQuFKaKhZ1SZQCnFNaNbNiNeKQCfZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTaZQxFNaNbNiNeKaZv@Base 12
+ _D4core8internal8lifetime__T18emplaceInitializerTkZQxFNaNbNiNeKkZv@Base 12
+ _D4core8internal8postblit11__moduleRefZ@Base 12
+ _D4core8internal8postblit12__ModuleInfoZ@Base 12
+ _D4core8internal8spinlock11__moduleRefZ@Base 12
+ _D4core8internal8spinlock12__ModuleInfoZ@Base 12
+ _D4core8internal8spinlock15AlignedSpinLock6__ctorMOFNbNcNiNeEQChQCfQBz8SpinLock10ContentionZOSQDoQDmQDgQDa@Base 12
+ _D4core8internal8spinlock15AlignedSpinLock6__initZ@Base 12
+ _D4core8internal8spinlock8SpinLock4lockMOFNbNiNeZv@Base 12
+ _D4core8internal8spinlock8SpinLock5yieldMOFNbNiNemZv@Base 12
+ _D4core8internal8spinlock8SpinLock6__ctorMOFNbNcNiNeEQBzQBxQBrQBl10ContentionZOSQDaQCyQCsQCm@Base 12
+ _D4core8internal8spinlock8SpinLock6__initZ@Base 12
+ _D4core8internal8spinlock8SpinLock6unlockMOFNbNiNeZv@Base 12
+ _D4core8internal9container5array11__moduleRefZ@Base 12
+ _D4core8internal9container5array12__ModuleInfoZ@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk11__invariantMxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk12__invariant0MxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk4backMNgFNaNbNcNdNiZNgAv@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk4swapMFNaNbNiNfKSQCkQCiQCcQBv__TQBsTQBpZQCaZv@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk5frontMNgFNaNbNcNdNiNfZNgAv@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk5resetMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk6__dtorMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk6__initZ@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk6lengthMFNbNdNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk6lengthMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk6removeMFNbNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk7opIndexMNgFNaNbNcNimZNgAv@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk7opSliceMNgFNaNbNiZANgAv@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk7opSliceMNgFNaNbNimmZANgAv@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk7popBackMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk8opAssignMFNbNcNiNjSQCnQClQCfQBy__TQBvTQBsZQCdZQBc@Base 12
+ _D4core8internal9container5array__T5ArrayTAvZQk__T10insertBackZQnMFNbNiQBdZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf11__invariantMxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf12__invariant0MxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf4backMNgFNaNbNcNdNiZNgPSQBxQBwQBqQBp@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf4swapMFNaNbNiNfKSQDgQDeQCyQCr__TQCoTQClZQCwZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf5frontMNgFNaNbNcNdNiNfZNgPSQCaQBzQBtQBs@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf5resetMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf6__dtorMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf6__initZ@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf6lengthMFNbNdNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf6lengthMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf6removeMFNbNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf7opIndexMNgFNaNbNcNimZNgPSQBzQByQBsQBr@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf7opSliceMNgFNaNbNiZANgPSQBxQBwQBqQBp@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf7opSliceMNgFNaNbNimmZANgPSQBzQByQBsQBr@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf7popBackMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf8opAssignMFNbNcNiNjSQDjQDhQDbQCu__TQCrTQCoZQCzZQBc@Base 12
+ _D4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf__T10insertBackZQnMFNbNiKQCaZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu11__invariantMxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu12__invariant0MxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu4backMNgFNaNbNcNdNiZNgPSQFcQFaQEuQDm__TQDhTQDcTQDcZQDtQCl@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu4swapMFNaNbNiNfKSQEvQEtQEnQEg__TQEdTQEaZQElZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu5frontMNgFNaNbNcNdNiNfZNgPSQFfQFdQExQDp__TQDkTQDfTQDfZQDwQCo@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu5resetMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu6__dtorMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu6__initZ@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu6lengthMFNbNdNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu6lengthMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu6removeMFNbNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu7opIndexMNgFNaNbNcNimZNgPSQFeQFcQEwQDo__TQDjTQDeTQDeZQDvQCn@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu7opSliceMNgFNaNbNiZANgPSQFcQFaQEuQDm__TQDhTQDcTQDcZQDtQCl@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu7opSliceMNgFNaNbNimmZANgPSQFeQFcQEwQDo__TQDjTQDeTQDeZQDvQCn@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu7popBackMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCu8opAssignMFNbNcNiNjSQEyQEwQEqQEj__TQEgTQEdZQEoZQBc@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw11__invariantMxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw12__invariant0MxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw4backMNgFNaNbNcNdNiZNgPSQFeQFcQEwQDo__TQDjTQDeTQDfZQDvQCl@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw4swapMFNaNbNiNfKSQExQEvQEpQEi__TQEfTQEcZQEnZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw5frontMNgFNaNbNcNdNiNfZNgPSQFhQFfQEzQDr__TQDmTQDhTQDiZQDyQCo@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw5resetMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw6__dtorMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw6__initZ@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw6lengthMFNbNdNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw6lengthMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw6removeMFNbNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw7opIndexMNgFNaNbNcNimZNgPSQFgQFeQEyQDq__TQDlTQDgTQDhZQDxQCn@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw7opSliceMNgFNaNbNiZANgPSQFeQFcQEwQDo__TQDjTQDeTQDfZQDvQCl@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw7opSliceMNgFNaNbNimmZANgPSQFgQFeQEyQDq__TQDlTQDgTQDhZQDxQCn@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw7popBackMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCw8opAssignMFNbNcNiNjSQFaQEyQEsQEl__TQEiTQEfZQEqZQBc@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu11__invariantMxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu12__invariant0MxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu4backMNgFNaNbNcNdNiZNgPSQFcQFaQEuQDm__TQDhTQDcTiZQDrQCj@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu4swapMFNaNbNiNfKSQEvQEtQEnQEg__TQEdTQEaZQElZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu5frontMNgFNaNbNcNdNiNfZNgPSQFfQFdQExQDp__TQDkTQDfTiZQDuQCm@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu5resetMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu6__dtorMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu6__initZ@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu6lengthMFNbNdNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu6lengthMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu6removeMFNbNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu7opIndexMNgFNaNbNcNimZNgPSQFeQFcQEwQDo__TQDjTQDeTiZQDtQCl@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu7opSliceMNgFNaNbNiZANgPSQFcQFaQEuQDm__TQDhTQDcTiZQDrQCj@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu7opSliceMNgFNaNbNimmZANgPSQFeQFcQEwQDo__TQDjTQDeTiZQDtQCl@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu7popBackMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTPSQBqQBoQBi7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCu8opAssignMFNbNcNiNjSQEyQEwQEqQEj__TQEgTQEdZQEoZQBc@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk11__invariantMxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk12__invariant0MxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk4backMNgFNaNbNcNdNiZNgSQCcQCbQBvQBu@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk4swapMFNaNbNiNfKSQDlQDjQDdQCw__TQCtTQCqZQDbZv@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk5frontMNgFNaNbNcNdNiNfZNgSQCfQCeQByQBx@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk5resetMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk6__dtorMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk6__initZ@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk6lengthMFNbNdNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk6lengthMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk6removeMFNbNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk7opIndexMNgFNaNbNcNimZNgSQCeQCdQBxQBw@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk7opSliceMNgFNaNbNiZANgSQCcQCbQBvQBu@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk7opSliceMNgFNaNbNimmZANgSQCeQCdQBxQBw@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk7popBackMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk8opAssignMFNbNcNiNjSQDoQDmQDgQCz__TQCwTQCtZQDeZQBc@Base 12
+ _D4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk__T10insertBackZQnMFNbNiQCeZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh11__invariantMxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh12__invariant0MxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh4backMNgFNaNbNcNdNiZNgSQDoQBzQBzQBp@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh4swapMFNaNbNiNfKSQDiQDgQDaQCt__TQCqTQCnZQCyZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh5frontMNgFNaNbNcNdNiNfZNgSQDrQCcQCcQBs@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh5resetMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh6__dtorMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh6__initZ@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh6lengthMFNbNdNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh6lengthMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh6removeMFNbNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh7opIndexMNgFNaNbNcNimZNgSQDqQCbQCbQBr@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh7opSliceMNgFNaNbNiZANgSQDoQBzQBzQBp@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh7opSliceMNgFNaNbNimmZANgSQDqQCbQCbQBr@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh7popBackMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh8opAssignMFNbNcNiNjSQDlQDjQDdQCw__TQCtTQCqZQDbZQBc@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh__T10insertBackZQnMFNbNiQCbZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi11__invariantMxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi12__invariant0MxFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi4backMNgFNaNbNcNdNiZNgSQDpQCaQCaQBq@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi4swapMFNaNbNiNfKSQDjQDhQDbQCu__TQCrTQCoZQCzZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi5frontMNgFNaNbNcNdNiNfZNgSQDsQCdQCdQBt@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi5resetMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi6__dtorMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi6__initZ@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi6lengthMFNbNdNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi6lengthMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi6removeMFNbNimZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi7opIndexMNgFNaNbNcNimZNgSQDrQCcQCcQBs@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi7opSliceMNgFNaNbNiZANgSQDpQCaQCaQBq@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi7opSliceMNgFNaNbNimmZANgSQDrQCcQCcQBs@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi7popBackMFNbNiZv@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi8opAssignMFNbNcNiNjSQDmQDkQDeQCx__TQCuTQCrZQDcZQBc@Base 12
+ _D4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi__T10insertBackZQnMFNbNiQCcZv@Base 12
+ _D4core8internal9container5treap11__moduleRefZ@Base 12
+ _D4core8internal9container5treap12__ModuleInfoZ@Base 12
+ _D4core8internal9container5treap4Rand5frontMFNaNbNdNiNfZk@Base 12
+ _D4core8internal9container5treap4Rand6__initZ@Base 12
+ _D4core8internal9container5treap4Rand6opCallMFNaNbNiNfZk@Base 12
+ _D4core8internal9container5treap4Rand8popFrontMFNaNbNiNfZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh10initializeMFNaNbNiNfmZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh13opApplyHelperFNbxPSQDmQDkQDeQCx__TQCuTQCrZQDc4NodeMDFNbKxSQEzQDkQDkQDaZiZi@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh4Node6__initZ@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh6__dtorMFNbNiZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh6__initZ@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh6insertMFNbNiPSQDgQDeQCyQCr__TQCoTQClZQCw4NodeQCxZQBl@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh6insertMFNbNiQBqZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh6removeFNbNiPPSQDgQDeQCyQCr__TQCoTQClZQCw4NodeQCxZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh6removeMFNbNiQBqZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh7opApplyMFNbMDFNbKQBvZiZi@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh7opApplyMxFNbMDFNbKxSQDmQBxQBxQBnZiZi@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh7rotateLFNaNbNiNfPSQDkQDiQDcQCv__TQCsTQCpZQDa4NodeZQBi@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh7rotateRFNaNbNiNfPSQDkQDiQDcQCv__TQCsTQCpZQDa4NodeZQBi@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh8freeNodeFNbNiPSQDhQDfQCzQCs__TQCpTQCmZQCx4NodeZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh8opAssignMFNbNcNiNjSQDlQDjQDdQCw__TQCtTQCqZQDbZQBc@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh9allocNodeMFNbNiQBtZPSQDnQDlQDfQCy__TQCvTQCsZQDd4Node@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh9removeAllFNbNiPSQDiQDgQDaQCt__TQCqTQCnZQCy4NodeZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh9removeAllMFNbNiZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi10initializeMFNaNbNiNfmZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi13opApplyHelperFNbxPSQDnQDlQDfQCy__TQCvTQCsZQDd4NodeMDFNbKxSQFaQDlQDlQDbZiZi@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi4Node11__xopEqualsMxFKxSQDqQDoQDiQDb__TQCyTQCvZQDgQByZb@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi4Node6__initZ@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi4Node9__xtoHashFNbNeKxSQDpQDnQDhQDa__TQCxTQCuZQDfQBxZm@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi6__dtorMFNbNiZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi6__initZ@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi6insertMFNbNiPSQDhQDfQCzQCs__TQCpTQCmZQCx4NodeQCyZQBl@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi6insertMFNbNiQBrZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi6removeFNbNiPPSQDhQDfQCzQCs__TQCpTQCmZQCx4NodeQCyZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi6removeMFNbNiQBrZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi7opApplyMFNbMDFNbKQBwZiZi@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi7opApplyMxFNbMDFNbKxSQDnQByQByQBoZiZi@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi7rotateLFNaNbNiNfPSQDlQDjQDdQCw__TQCtTQCqZQDb4NodeZQBi@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi7rotateRFNaNbNiNfPSQDlQDjQDdQCw__TQCtTQCqZQDb4NodeZQBi@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi8freeNodeFNbNiPSQDiQDgQDaQCt__TQCqTQCnZQCy4NodeZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi8opAssignMFNbNcNiNjSQDmQDkQDeQCx__TQCuTQCrZQDcZQBc@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi9allocNodeMFNbNiQBuZPSQDoQDmQDgQCz__TQCwTQCtZQDe4Node@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi9removeAllFNbNiPSQDjQDhQDbQCu__TQCrTQCoZQCz4NodeZv@Base 12
+ _D4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi9removeAllMFNbNiZv@Base 12
+ _D4core8internal9container6common11__moduleRefZ@Base 12
+ _D4core8internal9container6common12__ModuleInfoZ@Base 12
+ _D4core8internal9container6common7xmallocFNbNimZPv@Base 12
+ _D4core8internal9container6common8xreallocFNbNiPvmZQe@Base 12
+ _D4core8internal9container6common__T10initializeTAvZQqFNaNbNiNfKQpZv@Base 12
+ _D4core8internal9container6common__T10initializeTPS3gcc8sections3elf3DSOZQBlFNaNbNiNfKQBlZv@Base 12
+ _D4core8internal9container6common__T10initializeTPSQBxQBvQBp7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQDaFNaNbNiNfKQDaZv@Base 12
+ _D4core8internal9container6common__T10initializeTPSQBxQBvQBp7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQDcFNaNbNiNfKQDcZv@Base 12
+ _D4core8internal9container6common__T10initializeTPSQBxQBvQBp7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQDaFNaNbNiNfKQDaZv@Base 12
+ _D4core8internal9container6common__T10initializeTS3gcc8sections3elf9ThreadDSOZQBqFNaNbNiNfKQBqZv@Base 12
+ _D4core8internal9container6common__T10initializeTSQBw2gc11gcinterface4RootZQBnFNaNbNiNfKQBnZv@Base 12
+ _D4core8internal9container6common__T10initializeTSQBw2gc11gcinterface5RangeZQBoFNaNbNiNfKQBoZv@Base 12
+ _D4core8internal9container6common__T10initializeTSQBwQBuQBo7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCzFNaNbNiNfKQCzZv@Base 12
+ _D4core8internal9container6common__T10initializeTSQBwQBuQBo7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQDbFNaNbNiNfKQDbZv@Base 12
+ _D4core8internal9container6common__T10initializeTSQBwQBuQBo7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCzFNaNbNiNfKQCzZv@Base 12
+ _D4core8internal9container6common__T7destroyTAvZQmFNaNbNiNfKQpZv@Base 12
+ _D4core8internal9container6common__T7destroyTPS3gcc8sections3elf3DSOZQBhFNaNbNiNfKQBlZv@Base 12
+ _D4core8internal9container6common__T7destroyTPSQBtQBrQBl7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCwFNaNbNiNfKQDaZv@Base 12
+ _D4core8internal9container6common__T7destroyTPSQBtQBrQBl7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCyFNaNbNiNfKQDcZv@Base 12
+ _D4core8internal9container6common__T7destroyTPSQBtQBrQBl7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCwFNaNbNiNfKQDaZv@Base 12
+ _D4core8internal9container6common__T7destroyTS3gcc8sections3elf9ThreadDSOZQBmFNaNbNiNfKQBqZv@Base 12
+ _D4core8internal9container6common__T7destroyTSQBs2gc11gcinterface4RootZQBjFNaNbNiNfKQBnZv@Base 12
+ _D4core8internal9container6common__T7destroyTSQBs2gc11gcinterface5RangeZQBkFNaNbNiNfKQBoZv@Base 12
+ _D4core8internal9container6common__T7destroyTSQBsQBqQBk7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQCvFNaNbNiNfKQCzZv@Base 12
+ _D4core8internal9container6common__T7destroyTSQBsQBqQBk7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQCxFNaNbNiNfKQDbZv@Base 12
+ _D4core8internal9container6common__T7destroyTSQBsQBqQBk7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4NodeZQCvFNaNbNiNfKQCzZv@Base 12
+ _D4core8internal9container7hashtab11__moduleRefZ@Base 12
+ _D4core8internal9container7hashtab12__ModuleInfoZ@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi10__aggrDtorMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi11__fieldDtorMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi13opIndexAssignMFNbNiQBtQCaZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi18ensureNotInOpApplyMFNaNbNiNfZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi3getMFNbNiQBmZPQBn@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4Node11__xopEqualsMxFKxSQDsQDqQDkQDd__TQCyTQCtTQCtZQDkQCcZb@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4Node6__initZ@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4Node9__xtoHashFNbNeKxSQDrQDpQDjQDc__TQCxTQCsTQCsZQDjQCbZm@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4growMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4maskMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi5resetMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi6__dtorMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi6__initZ@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi6hashOfFNaNbNiNeMKxAaZm@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi6lengthMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi6removeMFNbNiIAaZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi6shrinkMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi7opApplyMFMDFKQBqKQBqZiZi@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi7opIndexMNgFNaNbNcNiQBwZNgSQByQByQBr@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi8opAssignMFNbNcNiNjSQDoQDmQDgQCz__TQCuTQCpTQCpZQDgZQBg@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi__T13opBinaryRightVAyaa2_696eZQBbMNgFNaNbNiMxAaZPNgSQCxQCxQCq@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk10__aggrDtorMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk11__fieldDtorMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk13opIndexAssignMFNbNiQBwQCcZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk18ensureNotInOpApplyMFNaNbNiNfZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk3getMFNbNiQBoZPQBq@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4Node6__initZ@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4growMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4maskMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk5resetMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk6__dtorMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk6__initZ@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk6hashOfFNaNbNiNeMKxPvZm@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk6lengthMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk6removeMFNbNiIPvZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk6shrinkMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk7opApplyMFMDFKQBsKQBtZiZi@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk7opIndexMNgFNaNbNcNiQByZNgPSQCbQCaQBuQBt@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk8opAssignMFNbNcNiNjSQDqQDoQDiQDb__TQCwTQCrTQCsZQDiZQBg@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk__T13opBinaryRightVAyaa2_696eZQBbMNgFNaNbNiMxPvZPNgPSQDaQCzQCtQCs@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi10__aggrDtorMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi11__fieldDtorMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi13opIndexAssignMFNbNiiQByZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi14__aggrPostblitMFNaNbNiNfZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi18ensureNotInOpApplyMFNaNbNiNfZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi3getMFNbNiQBmZPi@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4Node6__initZ@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4growMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4maskMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi5emptyMxFNaNbNdNiNfZb@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi5resetMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi6__dtorMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi6__initZ@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi6hashOfFNaNbNiNeMKxPyQBvZm@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi6lengthMxFNaNbNdNiNfZm@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi6removeMFNbNiIPyQBqZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi6shrinkMFNbNiZv@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi7opApplyMFMDFKQBqKiZiZi@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi7opIndexMNgFNaNbNcNiQBwZNgi@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi8opAssignMFNbNcNiNjSQDoQDmQDgQCz__TQCuTQCpTiZQDeZQBe@Base 12
+ _D4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi__T13opBinaryRightVAyaa2_696eZQBbMNgFNaNbNiMxPyQCvZPNgi@Base 12
+ _D4core8lifetime11__moduleRefZ@Base 12
+ _D4core8lifetime12__ModuleInfoZ@Base 12
+ _D4core8lifetime__T11copyEmplaceTS3std11concurrency3TidTQxZQBoFNaNbNiKQBlKQBpZv@Base 12
+ _D4core8lifetime__T11moveEmplaceTAyaZQsFNaNbNiKQoKQrZv@Base 12
+ _D4core8lifetime__T11moveEmplaceTC3std3zip13ArchiveMemberZQBnFNaNbNiKQBkKQBoZv@Base 12
+ _D4core8lifetime__T11moveEmplaceTS3std8datetime8timezone13PosixTimeZone10LeapSecondZQCnFNaNbNiKQCkKQCoZv@Base 12
+ _D4core8lifetime__T11moveEmplaceTS3std8datetime8timezone13PosixTimeZone14TempTransitionZQCrFNaNbNiKQCoKQCsZv@Base 12
+ _D4core8lifetime__T15moveEmplaceImplTAvZQvFNaNbNiNfMKQqNkMKQwZv@Base 12
+ _D4core8lifetime__T15moveEmplaceImplTAyaZQwFNaNbNiNfMKQrNkMKQxZv@Base 12
+ _D4core8lifetime__T15moveEmplaceImplTC3std3zip13ArchiveMemberZQBrFNaNbNiNfMKQBnNkMKQBuZv@Base 12
+ _D4core8lifetime__T15moveEmplaceImplTE3std12experimental6loggerQCj8LogLevelZQCfFNaNbNiNfKQCaKQCeZv@Base 12
+ _D4core8lifetime__T15moveEmplaceImplTS3std5regex8internal2ir__T5InputTaZQjZQCeFNaNbNiNfMKQCaNkMKQChZv@Base 12
+ _D4core8lifetime__T15moveEmplaceImplTS3std5regex8internal2ir__T5RegexTaZQjZQCeFNaNbNiNfMKQCaNkMKQChZv@Base 12
+ _D4core8lifetime__T15moveEmplaceImplTS3std8bitmanip__T7BitsSetTmZQlZQBxFNaNbNiNfKQBsKQBwZv@Base 12
+ _D4core8lifetime__T15moveEmplaceImplTS3std8datetime8timezone13PosixTimeZone10LeapSecondZQCrFNaNbNiNfKQCmKQCqZv@Base 12
+ _D4core8lifetime__T15moveEmplaceImplTS3std8datetime8timezone13PosixTimeZone14TempTransitionZQCvFNaNbNiNfMKQCrNkMKQCyZv@Base 12
+ _D4core8lifetime__T15trustedMoveImplTAvZQvFNaNbNiNeNkMKQsZQv@Base 12
+ _D4core8lifetime__T15trustedMoveImplTAyaZQwFNaNbNiNeNkMKQtZQw@Base 12
+ _D4core8lifetime__T15trustedMoveImplTC3std3zip13ArchiveMemberZQBrFNaNbNiNeNkMKQBpZQBt@Base 12
+ _D4core8lifetime__T15trustedMoveImplTE3std12experimental6loggerQCj8LogLevelZQCfFNaNbNiNeKQCaZQCe@Base 12
+ _D4core8lifetime__T15trustedMoveImplTS3std5regex8internal2ir__T5InputTaZQjZQCeFNaNbNiNeNkMKQCcZQCg@Base 12
+ _D4core8lifetime__T15trustedMoveImplTS3std5regex8internal2ir__T5RegexTaZQjZQCeFNaNbNiNeNkMKQCcZQCg@Base 12
+ _D4core8lifetime__T15trustedMoveImplTS3std8datetime8timezone13PosixTimeZone10LeapSecondZQCrFNaNbNiNeKQCmZQCq@Base 12
+ _D4core8lifetime__T15trustedMoveImplTS3std8datetime8timezone13PosixTimeZone14TempTransitionZQCvFNaNbNiNeNkMKQCtZQCx@Base 12
+ _D4core8lifetime__T4moveTAvZQjFNaNbNiNfNkMKQsZQv@Base 12
+ _D4core8lifetime__T4moveTAyaZQkFNaNbNiNfNkMKQtZQw@Base 12
+ _D4core8lifetime__T4moveTC3std3zip13ArchiveMemberZQBfFNaNbNiNfNkMKQBpZQBt@Base 12
+ _D4core8lifetime__T4moveTE3std12experimental6loggerQBx8LogLevelZQBtFNaNbNiNfKQCaZQCe@Base 12
+ _D4core8lifetime__T4moveTS3std5regex8internal2ir__T5InputTaZQjZQBsFNaNbNiNfNkMKQCcZQCg@Base 12
+ _D4core8lifetime__T4moveTS3std5regex8internal2ir__T5RegexTaZQjZQBsFNaNbNiNfNkMKQCcZQCg@Base 12
+ _D4core8lifetime__T4moveTS3std8bitmanip__T7BitsSetTmZQlZQBlFNaNbNiNfKQBsKQBwZv@Base 12
+ _D4core8lifetime__T4moveTS3std8datetime8timezone13PosixTimeZone10LeapSecondZQCfFNaNbNiNfKQCmZQCq@Base 12
+ _D4core8lifetime__T4moveTS3std8datetime8timezone13PosixTimeZone14TempTransitionZQCjFNaNbNiNfNkMKQCtZQCx@Base 12
+ _D4core8lifetime__T7emplaceTAyaTQeZQqFNaNbNiNfPQtKQwZQh@Base 12
+ _D4core8lifetime__T7emplaceTC3std12experimental6logger10filelogger10FileLoggerTSQBz5stdio4FileTEQCpQCoQCdQDz8LogLevelZQDvFAvKQBuQBhZQEa@Base 12
+ _D4core8lifetime__T7emplaceTC3std12experimental6logger10filelogger10FileLoggerTSQBz5stdio4FileTEQCpQCoQCdQDz8LogLevelZQDvFNfQDsKQBxQBkZQEd@Base 12
+ _D4core8lifetime__T7emplaceTC3std12experimental6loggerQCa16StdForwardLoggerTEQBwQBvQBkQDg8LogLevelZQDcFAvQBdZQDd@Base 12
+ _D4core8lifetime__T7emplaceTC3std12experimental6loggerQCa16StdForwardLoggerTEQBwQBvQBkQDg8LogLevelZQDcFNfQCzQBgZQDg@Base 12
+ _D4core8lifetime__T7emplaceTC3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocatorTOSQDcQDbQCq18RCISharedAllocatorZQEqFNaNbNiAvKOQBtZQEz@Base 12
+ _D4core8lifetime__T7emplaceTC3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocatorTOSQDcQDbQCq18RCISharedAllocatorZQEqFNaNbNiNfQEtKOQBwZQFc@Base 12
+ _D4core8lifetime__T7emplaceTC3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEkZQGdFNaNbNiAvZQGh@Base 12
+ _D4core8lifetime__T7emplaceTC3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEkZQGdFNaNbNiNfQGgZQGk@Base 12
+ _D4core8lifetime__T7emplaceTC3std3zip13ArchiveMemberTQzZQBlFNaNbNiNfPQBpKQBtZQj@Base 12
+ _D4core8lifetime__T7emplaceTC3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCaTxSQDqQDpQDmQBh__T5RegexTaZQjTQCjTAvZQFiFNaNfQFhKxQBwQDgQwZQFv@Base 12
+ _D4core8lifetime__T7emplaceTC3std5regex8internal12backtracking__T19BacktrackingMatcherTaTSQCjQCiQCf2ir__T5InputTaZQjZQCaTxSQDqQDpQDmQBh__T5RegexTaZQjTQCjTAvZQFiFNaQjKxQBtQDdQtZQFs@Base 12
+ _D4core8lifetime__T7emplaceTC3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBwTxSQDhQDgQDdQBh__T5RegexTaZQjTQCjTAvZQEzFNaNfQEyKxQBwQDgQwZQFm@Base 12
+ _D4core8lifetime__T7emplaceTC3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBwTxSQDhQDgQDdQBh__T5RegexTaZQjTQCjTAvZQEzFNaQjKxQBtQDdQtZQFj@Base 12
+ _D4core8lifetime__T7emplaceTCQBb4sync5mutex5MutexZQBfFNbNiAvZQBh@Base 12
+ _D4core8lifetime__T7emplaceTCQBb4sync5mutex5MutexZQBfFNbNiNfQBgZQBk@Base 12
+ _D4core8lifetime__T7emplaceTCQBb8internal2gc4impl12conservativeQw14ConservativeGCZQClFQCgZQCk@Base 12
+ _D4core8lifetime__T7emplaceTCQBb8internal2gc4impl6manualQp8ManualGCZQBxFQBsZQBw@Base 12
+ _D4core8lifetime__T7emplaceTCQBb9exception10RangeErrorTAyaTmTnZQBsFNaNbNiNfQBvKQyKmKQxZQCh@Base 12
+ _D4core8lifetime__T7emplaceTCQBb9exception11AssertErrorTAyaTQeTmZQBuFNaNbNiNfQBxKQzKQBcKmZQCk@Base 12
+ _D4core8lifetime__T7emplaceTCQBb9exception11AssertErrorTAyaTmZQBrFNaNbNiNfQBuKQwKmZQCd@Base 12
+ _D4core8lifetime__T7emplaceTCQBb9exception13FinalizeErrorTC8TypeInfoTC6object9ThrowableTAyaTmZQCxFNaNbNiNfQDaKQCaKQBtKQBeKmZQDs@Base 12
+ _D4core8lifetime__T7emplaceTCQBb9exception15ArrayIndexErrorTmTmTAyaTmTnZQCbFNaNbNiNfQCeKmKmKQBcKmKQBcZQCw@Base 12
+ _D4core8lifetime__T7emplaceTCQBb9exception15ArraySliceErrorTmTmTmTAyaTmTnZQCdFNaNbNiNfQCgKmKmKmKQBeKmKQBeZQDa@Base 12
+ _D4core8lifetime__T7emplaceTCQBb9exception16OutOfMemoryErrorTbZQBsFNaNbNiNfQBvKbZQCb@Base 12
+ _D4core8lifetime__T7emplaceTCQBb9exception16OutOfMemoryErrorZQBqFNaNbNiNfQBtZQBx@Base 12
+ _D4core8lifetime__T7emplaceTCQBb9exception27InvalidMemoryOperationErrorZQCbFNaNbNiNfQCeZQCi@Base 12
+ _D4core8lifetime__T7emplaceTCQBb9exception9ForkErrorTAyaTmTnZQBqFNaNbNiNfQBtKQyKmKQxZQCf@Base 12
+ _D4core8lifetime__T7emplaceTS3std10functional__T7memoizeS_DQBe5regex__T9regexImplTAyaZQpFNfxAyaAxaZSQCtQBp8internal2ir__T5RegexTaZQjVii8ZQDlFxQByQByZ5ValueTQEyZQFlFNaNbNiNfPQFpKQFtZQj@Base 12
+ _D4core8lifetime__T7emplaceTS3std10functional__T7memoizeS_DQBe5regex__T9regexImplTAyaZQpFNfxAyaAxaZSQCtQBp8internal2ir__T5RegexTaZQjVii8ZQDlFxQByQByZ5ValueTxQCnTQCoTQCoZQFuFNaNbNiNfPQFyKxQDrKQDsQDrZQr@Base 12
+ _D4core8lifetime__T7emplaceTS3std11concurrency__T4ListTSQBbQBa7MessageZQw4NodeTQyZQClFPQChKQBkZQj@Base 12
+ _D4core8lifetime__T7emplaceTS3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGqZQJrFNaNbNiNfPQJvZQf@Base 12
+ _D4core8lifetime__T7emplaceTS3std3net4curl3FTP4ImplZQBhFNaNbNiNfPQBlZQf@Base 12
+ _D4core8lifetime__T7emplaceTS3std3net4curl4HTTP4ImplZQBiFNaNbNiNfPQBmZQf@Base 12
+ _D4core8lifetime__T7emplaceTS3std3net4curl4SMTP4ImplZQBiFNaNbNiNfPQBmZQf@Base 12
+ _D4core8lifetime__T7emplaceTS3std4file15DirIteratorImplTAyaTEQBgQBf8SpanModeTbZQCiFNfPQCgKQBiKQBiKbZQp@Base 12
+ _D4core8lifetime__T7emplaceTS3std6socket11AddressInfoTQBaZQBnFNaNbNiNfPQBrKQBvZQj@Base 12
+ _D4core8lifetime__T7emplaceTaTaZQnFNaNbNiNfPaKaZQf@Base 12
+ _D4core8lifetime__T7emplaceTwTwZQnFNaNbNiNfPwKwZQf@Base 12
+ _D4core8lifetime__T8moveImplTAvZQnFNaNbNiNfNkMKQsZQv@Base 12
+ _D4core8lifetime__T8moveImplTAyaZQoFNaNbNiNfNkMKQtZQw@Base 12
+ _D4core8lifetime__T8moveImplTC3std3zip13ArchiveMemberZQBjFNaNbNiNfNkMKQBpZQBt@Base 12
+ _D4core8lifetime__T8moveImplTE3std12experimental6loggerQCb8LogLevelZQBxFNaNbNiNfKQCaZQCe@Base 12
+ _D4core8lifetime__T8moveImplTS3std5regex8internal2ir__T5InputTaZQjZQBwFNaNbNiNfNkMKQCcZQCg@Base 12
+ _D4core8lifetime__T8moveImplTS3std5regex8internal2ir__T5RegexTaZQjZQBwFNaNbNiNfNkMKQCcZQCg@Base 12
+ _D4core8lifetime__T8moveImplTS3std8bitmanip__T7BitsSetTmZQlZQBpFNaNbNiNfKQBsKQBwZv@Base 12
+ _D4core8lifetime__T8moveImplTS3std8datetime8timezone13PosixTimeZone10LeapSecondZQCjFNaNbNiNfKQCmZQCq@Base 12
+ _D4core8lifetime__T8moveImplTS3std8datetime8timezone13PosixTimeZone14TempTransitionZQCnFNaNbNiNfNkMKQCtZQCx@Base 12
+ _D4core8volatile11__moduleRefZ@Base 12
+ _D4core8volatile12__ModuleInfoZ@Base 12
+ _D4core9attribute11__moduleRefZ@Base 12
+ _D4core9attribute12__ModuleInfoZ@Base 12
+ _D4core9attribute9gnuAbiTag11__xopEqualsMxFKxSQBsQBqQBjZb@Base 12
+ _D4core9attribute9gnuAbiTag6__ctorMFNcAAyaXSQBqQBoQBh@Base 12
+ _D4core9attribute9gnuAbiTag6__initZ@Base 12
+ _D4core9attribute9gnuAbiTag9__xtoHashFNbNeKxSQBrQBpQBiZm@Base 12
+ _D4core9exception10RangeError6__ctorMFNaNbNiNfAyaQdmC6object9ThrowableZCQCsQCqQCj@Base 12
+ _D4core9exception10RangeError6__ctorMFNaNbNiNfAyamC6object9ThrowableZCQCqQCoQCh@Base 12
+ _D4core9exception10RangeError6__initZ@Base 12
+ _D4core9exception10RangeError6__vtblZ@Base 12
+ _D4core9exception10RangeError7__ClassZ@Base 12
+ _D4core9exception11AssertError6__ctorMFNaNbNiNfAyaQdmC6object9ThrowableZCQCtQCrQCk@Base 12
+ _D4core9exception11AssertError6__ctorMFNaNbNiNfAyamZCQBzQBxQBq@Base 12
+ _D4core9exception11AssertError6__ctorMFNaNbNiNfC6object9ThrowableAyamZCQCrQCpQCi@Base 12
+ _D4core9exception11AssertError6__initZ@Base 12
+ _D4core9exception11AssertError6__vtblZ@Base 12
+ _D4core9exception11AssertError7__ClassZ@Base 12
+ _D4core9exception11SwitchError6__ctorMFNaNbNiNfAyamC6object9ThrowableZCQCrQCpQCi@Base 12
+ _D4core9exception11SwitchError6__initZ@Base 12
+ _D4core9exception11SwitchError6__vtblZ@Base 12
+ _D4core9exception11SwitchError7__ClassZ@Base 12
+ _D4core9exception11__moduleRefZ@Base 12
+ _D4core9exception11rangeMsgPutFNaNbNiNfKAaMAxaZv@Base 12
+ _D4core9exception12__ModuleInfoZ@Base 12
+ _D4core9exception13FinalizeError6__ctorMFNaNbNiNfC8TypeInfoAyamC6object9ThrowableZCQDdQDbQCu@Base 12
+ _D4core9exception13FinalizeError6__ctorMFNaNbNiNfC8TypeInfoC6object9ThrowableAyamZCQDdQDbQCu@Base 12
+ _D4core9exception13FinalizeError6__initZ@Base 12
+ _D4core9exception13FinalizeError6__vtblZ@Base 12
+ _D4core9exception13FinalizeError7__ClassZ@Base 12
+ _D4core9exception13FinalizeError8toStringMxFNfZAya@Base 12
+ _D4core9exception13assertHandlerFNbNdNiNePFNbAyamQeZvZv@Base 12
+ _D4core9exception13assertHandlerFNbNdNiNeZPFNbAyamQeZv@Base 12
+ _D4core9exception14_assertHandlerPFNbAyamQeZv@Base 12
+ _D4core9exception15ArrayIndexError6__ctorMFNaNbNiNfmmAyamC6object9ThrowableZCQCxQCvQCo@Base 12
+ _D4core9exception15ArrayIndexError6__initZ@Base 12
+ _D4core9exception15ArrayIndexError6__vtblZ@Base 12
+ _D4core9exception15ArrayIndexError7__ClassZ@Base 12
+ _D4core9exception15ArraySliceError6__ctorMFNaNbNiNfmmmAyamC6object9ThrowableZCQCyQCwQCp@Base 12
+ _D4core9exception15ArraySliceError6__initZ@Base 12
+ _D4core9exception15ArraySliceError6__vtblZ@Base 12
+ _D4core9exception15ArraySliceError7__ClassZ@Base 12
+ _D4core9exception16OutOfMemoryError13superToStringMFNeZAya@Base 12
+ _D4core9exception16OutOfMemoryError6__ctorMFNaNbNiNfAyamC6object9ThrowableZCQCwQCuQCn@Base 12
+ _D4core9exception16OutOfMemoryError6__ctorMFNaNbNiNfbAyamC6object9ThrowableZCQCxQCvQCo@Base 12
+ _D4core9exception16OutOfMemoryError6__initZ@Base 12
+ _D4core9exception16OutOfMemoryError6__vtblZ@Base 12
+ _D4core9exception16OutOfMemoryError7__ClassZ@Base 12
+ _D4core9exception16OutOfMemoryError8toStringMxFNeZAya@Base 12
+ _D4core9exception16UnicodeException6__ctorMFNaNbNiNfAyamQemC6object9ThrowableZCQCzQCxQCq@Base 12
+ _D4core9exception16UnicodeException6__initZ@Base 12
+ _D4core9exception16UnicodeException6__vtblZ@Base 12
+ _D4core9exception16UnicodeException7__ClassZ@Base 12
+ _D4core9exception17SuppressTraceInfo6__initZ@Base 12
+ _D4core9exception17SuppressTraceInfo6__vtblZ@Base 12
+ _D4core9exception17SuppressTraceInfo7__ClassZ@Base 12
+ _D4core9exception17SuppressTraceInfo7opApplyMxFMDFKmKxAaZiZi@Base 12
+ _D4core9exception17SuppressTraceInfo7opApplyMxFMDFKxAaZiZi@Base 12
+ _D4core9exception17SuppressTraceInfo8instanceFNaNbNiNeZ2ityCQCgQCeQBx@Base 12
+ _D4core9exception17SuppressTraceInfo8instanceFNaNbNiNeZCQCcQCaQBt@Base 12
+ _D4core9exception17SuppressTraceInfo8toStringMxFZAya@Base 12
+ _D4core9exception27InvalidMemoryOperationError13superToStringMFNeZAya@Base 12
+ _D4core9exception27InvalidMemoryOperationError6__ctorMFNaNbNiNfAyamC6object9ThrowableZCQDhQDfQCy@Base 12
+ _D4core9exception27InvalidMemoryOperationError6__initZ@Base 12
+ _D4core9exception27InvalidMemoryOperationError6__vtblZ@Base 12
+ _D4core9exception27InvalidMemoryOperationError7__ClassZ@Base 12
+ _D4core9exception27InvalidMemoryOperationError8toStringMxFNeZAya@Base 12
+ _D4core9exception6_storeG256v@Base 12
+ _D4core9exception9ForkError6__ctorMFNaNbNiNfAyamC6object9ThrowableZCQCoQCmQCf@Base 12
+ _D4core9exception9ForkError6__initZ@Base 12
+ _D4core9exception9ForkError6__vtblZ@Base 12
+ _D4core9exception9ForkError7__ClassZ@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf10RangeErrorTAyaTmTnZQBqFKQnKmQlZ3getFNbNiZQBy@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf10RangeErrorTAyaTmTnZQBqFNaNbNiKQtKmQrZQBu@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf11AssertErrorTAyaTQeTmZQBsFKQoKQrKmZ3getFNbNiZQCb@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf11AssertErrorTAyaTQeTmZQBsFNaNbNiKQuKQxKmZQBx@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf11AssertErrorTAyaTmZQBpFKQlKmZ3getFNbNiZQBv@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf11AssertErrorTAyaTmZQBpFNaNbNiKQrKmZQBr@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf13FinalizeErrorTC8TypeInfoTC6object9ThrowableTAyaTmZQCvFKQBpKQBiKQtKmZ3getFNbNiZQDj@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf13FinalizeErrorTC8TypeInfoTC6object9ThrowableTAyaTmZQCvFNaNbNiKQBvKQBoKQzKmZQDf@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf15ArrayIndexErrorTmTmTAyaTmTnZQBzFKmKmKQrKmQpZ3getFNbNiZQCl@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf15ArrayIndexErrorTmTmTAyaTmTnZQBzFNaNbNiKmKmKQxKmQvZQCh@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf15ArraySliceErrorTmTmTmTAyaTmTnZQCbFKmKmKmKQtKmQrZ3getFNbNiZQCp@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf15ArraySliceErrorTmTmTmTAyaTmTnZQCbFNaNbNiKmKmKmKQzKmQxZQCl@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf16OutOfMemoryErrorTbZQBqFNaNbNibZQBo@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf16OutOfMemoryErrorTbZQBqFbZ3getFNbNiZQBs@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf16OutOfMemoryErrorZQBoFNaNbNiZQBl@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf16OutOfMemoryErrorZQBoFZ3getFNbNiZQBp@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf27InvalidMemoryOperationErrorZQBzFNaNbNiZQBw@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf27InvalidMemoryOperationErrorZQBzFZ3getFNbNiZQCa@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf9ForkErrorTAyaTmTnZQBoFKQnKmQlZ3getFNbNiZQBw@Base 12
+ _D4core9exception__T11staticErrorTCQBhQBf9ForkErrorTAyaTmTnZQBoFNaNbNiKQtKmQrZQBs@Base 12
+ _D50TypeInfo_C3std12experimental9allocator10IAllocator6__initZ@Base 12
+ _D50TypeInfo_E3std6format8internal5write12RoundingMode6__initZ@Base 12
+ _D50TypeInfo_E3std8internal4test10dummyrange9RangeType6__initZ@Base 12
+ _D50TypeInfo_E3std9algorithm9iteration14GroupingOpType6__initZ@Base 12
+ _D50TypeInfo_E4core3sys5linux10perf_event11perf_sw_ids6__initZ@Base 12
+ _D50TypeInfo_S3std4math8hardware20FloatingPointControl6__initZ@Base 12
+ _D50TypeInfo_S4core3sys5linux4tipc13sockaddr_tipc4Addr6__initZ@Base 12
+ _D50TypeInfo_S4core3sys5linux8io_uring14io_uring_probe6__initZ@Base 12
+ _D50TypeInfo_S4core3sys5linuxQk7inotify13inotify_event6__initZ@Base 12
+ _D50TypeInfo_S4core3sys5posix5spawn17posix_spawnattr_t6__initZ@Base 12
+ _D50TypeInfo_S4core3sys5posix7netinet3in_11sockaddr_in6__initZ@Base 12
+ _D50TypeInfo_S4core3sys5posix7pthread15pthread_cleanup6__initZ@Base 12
+ _D50TypeInfo_S4core3sys5posixQk5types15pthread_mutex_t6__initZ@Base 12
+ _D50TypeInfo_S4core8internal8spinlock15AlignedSpinLock6__initZ@Base 12
+ _D50TypeInfo_xE3std12experimental6logger4core8LogLevel6__initZ@Base 12
+ _D50TypeInfo_yS3std8internal14unicode_tables9CompEntry6__initZ@Base 12
+ _D51TypeInfo_AyS3std8internal14unicode_tables9CompEntry6__initZ@Base 12
+ _D51TypeInfo_E3std6format8internal5write13PrecisionType6__initZ@Base 12
+ _D51TypeInfo_E3std6format8internal5write13RoundingClass6__initZ@Base 12
+ _D51TypeInfo_E4core3sys5linux10perf_event12perf_type_id6__initZ@Base 12
+ _D51TypeInfo_E4core4sync7rwmutex14ReadWriteMutex6Policy6__initZ@Base 12
+ _D51TypeInfo_OS4core8internal8spinlock15AlignedSpinLock6__initZ@Base 12
+ _D51TypeInfo_S3std8internal4test3uda17HasPrivateMembers6__initZ@Base 12
+ _D51TypeInfo_S4core3sys5linux8io_uring15io_uring_params6__initZ@Base 12
+ _D51TypeInfo_S4core3sys5posix7netinet3in_12sockaddr_in66__initZ@Base 12
+ _D51TypeInfo_S4core3sys5posixQk5types16pthread_rwlock_t6__initZ@Base 12
+ _D51TypeInfo_xC3std12experimental9allocator10IAllocator6__initZ@Base 12
+ _D51TypeInfo_xS4core3sys5posixQk5types15pthread_mutex_t6__initZ@Base 12
+ _D51TypeInfo_xS4core8internal8spinlock15AlignedSpinLock6__initZ@Base 12
+ _D52TypeInfo_OxS4core8internal8spinlock15AlignedSpinLock6__initZ@Base 12
+ _D52TypeInfo_S3std12experimental9allocator12RCIAllocator6__initZ@Base 12
+ _D52TypeInfo_S3std3uni__T19PackedArrayViewImplThVmi8ZQBc6__initZ@Base 12
+ _D52TypeInfo_S4core3sys5linux2fs22file_dedupe_range_info6__initZ@Base 12
+ _D52TypeInfo_S4core3sys5linux3elf11Elf32_gptab9_gt_entry6__initZ@Base 12
+ _D52TypeInfo_S4core3sys5posixQk5types17_pthread_fastlock6__initZ@Base 12
+ _D52TypeInfo_S4core3sys5posixQk5types17pthread_barrier_t6__initZ@Base 12
+ _D52TypeInfo_S4core3sys5posixQk6socket16sockaddr_storage6__initZ@Base 12
+ _D52TypeInfo_S4core8internal2gc4impl12conservativeQw3Gcx6__initZ@Base 12
+ _D52TypeInfo_xAyS3std8internal14unicode_tables9CompEntry6__initZ@Base 12
+ _D53TypeInfo_E3std8typecons__T4FlagVAyaa6_756e73616665ZQz6__initZ@Base 12
+ _D53TypeInfo_E4core8internal2gc4impl12conservativeQw4Bins6__initZ@Base 12
+ _D53TypeInfo_S3std11parallelism__T4TaskSQBaQz3runTDFZvZQv6__initZ@Base 12
+ _D53TypeInfo_S3std3uni__T19PackedArrayViewImplTtVmi16ZQBd6__initZ@Base 12
+ _D53TypeInfo_S3std5range__T4TakeTSQuQs__T6RepeatTaZQkZQBb6__initZ@Base 12
+ _D53TypeInfo_S3std5regex8internal12backtracking9CtContext6__initZ@Base 12
+ _D53TypeInfo_S3std5regex8internal8thompson__T6ThreadTmZQk6__initZ@Base 12
+ _D53TypeInfo_S3std5stdio4File__T16BinaryWriterImplVbi1ZQx6__initZ@Base 12
+ _D53TypeInfo_S3std6digest3crc__T3CRCVki32Vmi3988292384ZQx6__initZ@Base 12
+ _D53TypeInfo_S4core3sys5linux8io_uring17io_cqring_offsets6__initZ@Base 12
+ _D53TypeInfo_S4core3sys5linux8io_uring17io_sqring_offsets6__initZ@Base 12
+ _D53TypeInfo_S4core3sys5linux8io_uring17io_uring_probe_op6__initZ@Base 12
+ _D53TypeInfo_S4core3sys5posixQk5types18pthread_condattr_t6__initZ@Base 12
+ _D53TypeInfo_S4core8internal2gc4impl12conservativeQw4List6__initZ@Base 12
+ _D53TypeInfo_S4core8internal2gc4impl12conservativeQw4Pool6__initZ@Base 12
+ _D54TypeInfo_E3std12experimental9allocator5typed9AllocFlag6__initZ@Base 12
+ _D54TypeInfo_E4core3sys5linux10perf_event15perf_event_type6__initZ@Base 12
+ _D54TypeInfo_E4core8internal8spinlock8SpinLock10Contention6__initZ@Base 12
+ _D54TypeInfo_S3std11concurrency__T4ListTSQBbQBa7MessageZQw6__initZ@Base 12
+ _D54TypeInfo_S3std8datetime8timezone13PosixTimeZone6TTInfo6__initZ@Base 12
+ _D54TypeInfo_S3std8internal14unicode_tables13FullCaseEntry6__initZ@Base 12
+ _D54TypeInfo_S4core3sys5linux10perf_event15perf_event_attr6__initZ@Base 12
+ _D54TypeInfo_S4core3sys5linux3elf11Elf32_gptab10_gt_header6__initZ@Base 12
+ _D54TypeInfo_S4core3sys5linux5stdio21cookie_io_functions_t6__initZ@Base 12
+ _D54TypeInfo_S4core3sys5linuxQk8signalfd16signalfd_siginfo6__initZ@Base 12
+ _D54TypeInfo_S4core3sys5posix9semaphore17_pthread_fastlock6__initZ@Base 12
+ _D54TypeInfo_S4core3sys5posixQk5types19pthread_mutexattr_t6__initZ@Base 12
+ _D54TypeInfo_xS3std5regex8internal8thompson__T6ThreadTmZQk6__initZ@Base 12
+ _D54TypeInfo_xS4core8internal2gc4impl12conservativeQw4List6__initZ@Base 12
+ _D55TypeInfo_E3std6format8internal5write17HasToStringResult6__initZ@Base 12
+ _D55TypeInfo_E4core3sys5linux10perf_event16perf_hw_cache_id6__initZ@Base 12
+ _D55TypeInfo_PxS3std5regex8internal8thompson__T6ThreadTmZQk6__initZ@Base 12
+ _D55TypeInfo_PxS4core8internal2gc4impl12conservativeQw4List6__initZ@Base 12
+ _D55TypeInfo_S3std3uni__T8CowArrayTSQwQu13ReallocPolicyZQBf6__initZ@Base 12
+ _D55TypeInfo_S3std5regex8internal9kickstart__T7ShiftOrTaZQl6__initZ@Base 12
+ _D55TypeInfo_S3std7variant__T8VariantNVmi32ZQp11SizeChecker6__initZ@Base 12
+ _D55TypeInfo_S4core3sys5linux4tipc13sockaddr_tipc4Addr4Name6__initZ@Base 12
+ _D55TypeInfo_S4core3sys5posix6signal9siginfo_t11_sifields_t6__initZ@Base 12
+ _D55TypeInfo_S4core3sys5posixQk5types20pthread_rwlockattr_t6__initZ@Base 12
+ _D55TypeInfo_S4core8internal9container5array__T5ArrayTAvZQk6__initZ@Base 12
+ _D55TypeInfo_xPS3std5regex8internal8thompson__T6ThreadTmZQk6__initZ@Base 12
+ _D55TypeInfo_xPS4core8internal2gc4impl12conservativeQw4List6__initZ@Base 12
+ _D55TypeInfo_yS3std8datetime8timezone13PosixTimeZone6TTInfo6__initZ@Base 12
+ _D56TypeInfo_AxPS4core8internal2gc4impl12conservativeQw4List6__initZ@Base 12
+ _D56TypeInfo_C3std12experimental9allocator16ISharedAllocator6__initZ@Base 12
+ _D56TypeInfo_C3std5regex8internal2ir__T14MatcherFactoryTaZQt6__initZ@Base 12
+ _D56TypeInfo_E2rt5minfo11ModuleGroup9sortCtorsMFAyaZ7OnCycle6__initZ@Base 12
+ _D56TypeInfo_E3std8typecons__T4FlagVAyaa7_646f436f756e74ZQBb6__initZ@Base 12
+ _D56TypeInfo_PyS3std8datetime8timezone13PosixTimeZone6TTInfo6__initZ@Base 12
+ _D56TypeInfo_S3std10checkedint__T7CheckedTmTSQBfQBe5AbortZQz6__initZ@Base 12
+ _D56TypeInfo_S3std12experimental6logger4core6Logger8LogEntry6__initZ@Base 12
+ _D56TypeInfo_S3std8internal14unicode_tables15SimpleCaseEntry6__initZ@Base 12
+ _D56TypeInfo_S3std8internal14unicode_tables15UnicodeProperty6__initZ@Base 12
+ _D56TypeInfo_S3std8typecons__T5TupleTEQy8encoding3BOMTAhZQBb6__initZ@Base 12
+ _D56TypeInfo_S4core3sys5linux10perf_event17perf_branch_entry6__initZ@Base 12
+ _D56TypeInfo_S4core3sys5linux10perf_event17perf_event_header6__initZ@Base 12
+ _D56TypeInfo_S4core3sys5linux10perf_event17perf_mem_data_src6__initZ@Base 12
+ _D56TypeInfo_S4core3sys5linux10perf_event17perf_ns_link_info6__initZ@Base 12
+ _D56TypeInfo_S4core3sys5linux8io_uring20io_uring_restriction6__initZ@Base 12
+ _D56TypeInfo_S4core3sys5posixQk5types21pthread_barrierattr_t6__initZ@Base 12
+ _D56TypeInfo_S4core6stdcpp11string_view__T11char_traitsTaZQq6__initZ@Base 12
+ _D56TypeInfo_S4core6stdcpp11string_view__T11char_traitsTuZQq6__initZ@Base 12
+ _D56TypeInfo_S4core6stdcpp11string_view__T11char_traitsTwZQq6__initZ@Base 12
+ _D56TypeInfo_S4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa6__initZ@Base 12
+ _D56TypeInfo_S4core8internal6traits23__InoutWorkaroundStruct6__initZ@Base 12
+ _D56TypeInfo_xS3std5regex8internal9kickstart__T7ShiftOrTaZQl6__initZ@Base 12
+ _D56TypeInfo_xS4core8internal9container5array__T5ArrayTAvZQk6__initZ@Base 12
+ _D57TypeInfo_APyS3std8datetime8timezone13PosixTimeZone6TTInfo6__initZ@Base 12
+ _D57TypeInfo_S3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh6__initZ@Base 12
+ _D57TypeInfo_S4core3sys5linux8io_uring21io_uring_files_update6__initZ@Base 12
+ _D57TypeInfo_xC3std12experimental9allocator16ISharedAllocator6__initZ@Base 12
+ _D57TypeInfo_xC3std5regex8internal2ir__T14MatcherFactoryTaZQt6__initZ@Base 12
+ _D57TypeInfo_yS3std8internal14unicode_tables15UnicodeProperty6__initZ@Base 12
+ _D58TypeInfo_AS3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh6__initZ@Base 12
+ _D58TypeInfo_AyS3std8internal14unicode_tables15UnicodeProperty6__initZ@Base 12
+ _D58TypeInfo_E3std8typecons__T4FlagVAyaa8_6164617074697665ZQBd6__initZ@Base 12
+ _D58TypeInfo_E3std8typecons__T4FlagVAyaa8_636865636b446e73ZQBd6__initZ@Base 12
+ _D58TypeInfo_E3std8typecons__T4FlagVAyaa8_696e646972656374ZQBd6__initZ@Base 12
+ _D58TypeInfo_E3std8typecons__T4FlagVAyaa8_706f704669727374ZQBd6__initZ@Base 12
+ _D58TypeInfo_E4core3sys5linux10perf_event19perf_hw_cache_op_id6__initZ@Base 12
+ _D58TypeInfo_G14PxS4core8internal2gc4impl12conservativeQw4List6__initZ@Base 12
+ _D58TypeInfo_OxC3std12experimental9allocator16ISharedAllocator6__initZ@Base 12
+ _D58TypeInfo_S3std12experimental9allocator18RCISharedAllocator6__initZ@Base 12
+ _D58TypeInfo_S3std5regex8internal8thompson__T10ThreadListTmZQp6__initZ@Base 12
+ _D58TypeInfo_S3std6format__T7sformatTaTxdZQoFNkMAaMAxaxdZ4Sink6__initZ@Base 12
+ _D58TypeInfo_S3std8internal7cstring__T17TempCStringBufferTaZQw6__initZ@Base 12
+ _D58TypeInfo_S3std8typecons__T5TupleTAyaTQeTQhTQkTQnTQqTQtZQBd6__initZ@Base 12
+ _D58TypeInfo_S4core3sys5linux8io_uring22io_uring_getevents_arg6__initZ@Base 12
+ _D58TypeInfo_S4core3sys5posix7pthread23_pthread_cleanup_buffer6__initZ@Base 12
+ _D58TypeInfo_xG14PS4core8internal2gc4impl12conservativeQw4List6__initZ@Base 12
+ _D58TypeInfo_xS3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh6__initZ@Base 12
+ _D59TypeInfo_AxS3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh6__initZ@Base 12
+ _D59TypeInfo_E4core3sys5linux10perf_event20perf_event_ioc_flags6__initZ@Base 12
+ _D59TypeInfo_E4core3sys5linux10perf_event20perf_sample_regs_abi6__initZ@Base 12
+ _D59TypeInfo_OS3std12experimental9allocator18RCISharedAllocator6__initZ@Base 12
+ _D59TypeInfo_S2rt9profilegc25_sharedStaticDtor_L115_C1FZ6Result6__initZ@Base 12
+ _D59TypeInfo_S3std11concurrency__T4ListTSQBbQBa7MessageZQw4Node6__initZ@Base 12
+ _D59TypeInfo_S3std3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImpl6__initZ@Base 12
+ _D59TypeInfo_S3std8datetime8timezone13PosixTimeZone10LeapSecond6__initZ@Base 12
+ _D59TypeInfo_S3std8datetime8timezone13PosixTimeZone10TempTTInfo6__initZ@Base 12
+ _D59TypeInfo_S3std8datetime8timezone13PosixTimeZone10Transition6__initZ@Base 12
+ _D59TypeInfo_S4core3sys5linux10perf_event20perf_event_mmap_page6__initZ@Base 12
+ _D59TypeInfo_S4core3sys5posix5spawn26posix_spawn_file_actions_t6__initZ@Base 12
+ _D59TypeInfo_xAS3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh6__initZ@Base 12
+ _D59TypeInfo_xAyS3std8internal14unicode_tables15UnicodeProperty6__initZ@Base 12
+ _D59TypeInfo_xS3std5regex8internal8thompson__T10ThreadListTmZQp6__initZ@Base 12
+ _D60TypeInfo_AS3std8datetime8timezone13PosixTimeZone10LeapSecond6__initZ@Base 12
+ _D60TypeInfo_AS3std8datetime8timezone13PosixTimeZone10TempTTInfo6__initZ@Base 12
+ _D60TypeInfo_AS3std8datetime8timezone13PosixTimeZone10Transition6__initZ@Base 12
+ _D60TypeInfo_E3std8typecons__T4FlagVAyaa9_6175746f5374617274ZQBf6__initZ@Base 12
+ _D60TypeInfo_E3std8typecons__T4FlagVAyaa9_6c65616e5269676874ZQBf6__initZ@Base 12
+ _D60TypeInfo_E3std8typecons__T4FlagVAyaa9_6f70656e5269676874ZQBf6__initZ@Base 12
+ _D60TypeInfo_E3std8typecons__T4FlagVAyaa9_706970654f6e506f70ZQBf6__initZ@Base 12
+ _D60TypeInfo_S3std11concurrency__T4ListTSQBbQBa7MessageZQw5Range6__initZ@Base 12
+ _D60TypeInfo_S3std3uni__T16SliceOverIndexedTSQBfQBe8GraphemeZQBk6__initZ@Base 12
+ _D60TypeInfo_S3std3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImpl6__initZ@Base 12
+ _D60TypeInfo_S3std3utf__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImpl6__initZ@Base 12
+ _D60TypeInfo_S3std3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImpl6__initZ@Base 12
+ _D60TypeInfo_S3std3utf__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImpl6__initZ@Base 12
+ _D60TypeInfo_S3std5array__T8AppenderTACQz3zip13ArchiveMemberZQBi6__initZ@Base 12
+ _D60TypeInfo_xS3std11concurrency__T4ListTSQBbQBa7MessageZQw4Node6__initZ@Base 12
+ _D60TypeInfo_xS3std3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImpl6__initZ@Base 12
+ _D60TypeInfo_xS3std8datetime8timezone13PosixTimeZone10LeapSecond6__initZ@Base 12
+ _D60TypeInfo_xS3std8datetime8timezone13PosixTimeZone10Transition6__initZ@Base 12
+ _D60TypeInfo_yS3std8datetime8timezone13PosixTimeZone10LeapSecond6__initZ@Base 12
+ _D60TypeInfo_yS3std8datetime8timezone13PosixTimeZone10Transition6__initZ@Base 12
+ _D61TypeInfo_AxS3std8datetime8timezone13PosixTimeZone10LeapSecond6__initZ@Base 12
+ _D61TypeInfo_AxS3std8datetime8timezone13PosixTimeZone10Transition6__initZ@Base 12
+ _D61TypeInfo_AyS3std8datetime8timezone13PosixTimeZone10LeapSecond6__initZ@Base 12
+ _D61TypeInfo_AyS3std8datetime8timezone13PosixTimeZone10Transition6__initZ@Base 12
+ _D61TypeInfo_E4core3sys5linux10perf_event22perf_callchain_context6__initZ@Base 12
+ _D61TypeInfo_E4core3sys5linux10perf_event22perf_event_read_format6__initZ@Base 12
+ _D61TypeInfo_E4core8internal2gc4impl12conservativeQw4Pool7ShiftBy6__initZ@Base 12
+ _D61TypeInfo_PxS3std11concurrency__T4ListTSQBbQBa7MessageZQw4Node6__initZ@Base 12
+ _D61TypeInfo_S2rt5minfo11ModuleGroup12sortCtorsOldMFAAiZ8StackRec6__initZ@Base 12
+ _D61TypeInfo_S3std5array__T8AppenderTASQz6socket11AddressInfoZQBj6__initZ@Base 12
+ _D61TypeInfo_S3std5regex8internal12backtracking9CtContext7CtState6__initZ@Base 12
+ _D61TypeInfo_S3std8bitmanip__T27FloatingPointRepresentationTdZQBg6__initZ@Base 12
+ _D61TypeInfo_S3std8bitmanip__T27FloatingPointRepresentationTfZQBg6__initZ@Base 12
+ _D61TypeInfo_S3std8typecons__T5TupleTAyaTQeTQhTQkTQnTQqTQtTQwZQBg6__initZ@Base 12
+ _D61TypeInfo_S3std8typecons__T5TupleTC15TypeInfo_StructTPG32hZQBg6__initZ@Base 12
+ _D61TypeInfo_S4core3sys5posix6signal9siginfo_t11_sifields_t5_rt_t6__initZ@Base 12
+ _D61TypeInfo_S4core4time__T12MonoTimeImplVEQBdQBb9ClockTypei0ZQBj6__initZ@Base 12
+ _D61TypeInfo_S4core8internal6string__T17TempStringNoAllocVhi20ZQz6__initZ@Base 12
+ _D61TypeInfo_xAS3std8datetime8timezone13PosixTimeZone10LeapSecond6__initZ@Base 12
+ _D61TypeInfo_xAS3std8datetime8timezone13PosixTimeZone10Transition6__initZ@Base 12
+ _D61TypeInfo_xPS3std11concurrency__T4ListTSQBbQBa7MessageZQw4Node6__initZ@Base 12
+ _D61TypeInfo_xS3std3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImpl6__initZ@Base 12
+ _D61TypeInfo_xS3std3utf__T10byCodeUnitTAxuZQrFQhZ14ByCodeUnitImpl6__initZ@Base 12
+ _D61TypeInfo_xS3std3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImpl6__initZ@Base 12
+ _D61TypeInfo_xS3std3utf__T10byCodeUnitTAyuZQrFQhZ14ByCodeUnitImpl6__initZ@Base 12
+ _D62TypeInfo_AS3std5regex8internal12backtracking9CtContext7CtState6__initZ@Base 12
+ _D62TypeInfo_E4core3sys5linux10perf_event23perf_branch_sample_type6__initZ@Base 12
+ _D62TypeInfo_S3std12experimental9allocator10mallocator10Mallocator6__initZ@Base 12
+ _D62TypeInfo_S3std3uni__T5StackTSQt8typecons__T5TupleTkTkTkZQnZQBm6__initZ@Base 12
+ _D62TypeInfo_S4core8internal2gc4impl12conservativeQw12LeakDetector6__initZ@Base 12
+ _D63TypeInfo_E3std8typecons__T4FlagVAyaa10_616c6c6f636174654743ZQBi6__initZ@Base 12
+ _D63TypeInfo_E3std8typecons__T4FlagVAyaa10_65786861757374697665ZQBi6__initZ@Base 12
+ _D63TypeInfo_E3std8typecons__T4FlagVAyaa10_6d756c7469626c6f636bZQBi6__initZ@Base 12
+ _D63TypeInfo_E3std8typecons__T4FlagVAyaa10_736f72744f7574707574ZQBi6__initZ@Base 12
+ _D63TypeInfo_E4core3sys5linux10perf_event24perf_event_sample_format6__initZ@Base 12
+ _D63TypeInfo_S3std11concurrency__T4ListTSQBbQBa7MessageZQw8SpinLock6__initZ@Base 12
+ _D63TypeInfo_S3std6digest3crc__T3CRCVki64VmN2882303761517117440ZQBg6__initZ@Base 12
+ _D63TypeInfo_S3std6digest3crc__T3CRCVki64VmN3932672073523589310ZQBg6__initZ@Base 12
+ _D63TypeInfo_S3std8datetime8timezone13PosixTimeZone14TempTransition6__initZ@Base 12
+ _D63TypeInfo_S3std8datetime8timezone13PosixTimeZone14TransitionType6__initZ@Base 12
+ _D63TypeInfo_S4core3sys5posix6signal9siginfo_t11_sifields_t7_kill_t6__initZ@Base 12
+ _D64TypeInfo_AS3std8datetime8timezone13PosixTimeZone14TempTransition6__initZ@Base 12
+ _D64TypeInfo_E4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa7AddType6__initZ@Base 12
+ _D64TypeInfo_OS3std11concurrency__T4ListTSQBbQBa7MessageZQw8SpinLock6__initZ@Base 12
+ _D64TypeInfo_PS3std8datetime8timezone13PosixTimeZone14TransitionType6__initZ@Base 12
+ _D64TypeInfo_S3std6random__T14XorshiftEngineTkVki160Vii2ViN1ViN4ZQBl6__initZ@Base 12
+ _D64TypeInfo_S3std6random__T14XorshiftEngineTkVki192ViN2Vii1Vii4ZQBl6__initZ@Base 12
+ _D64TypeInfo_S4core3sys5posix6signal9siginfo_t11_sifields_t8_timer_t6__initZ@Base 12
+ _D64TypeInfo_xS3std8datetime8timezone13PosixTimeZone14TempTransition6__initZ@Base 12
+ _D65TypeInfo_APS3std8datetime8timezone13PosixTimeZone14TransitionType6__initZ@Base 12
+ _D65TypeInfo_AxS3std8datetime8timezone13PosixTimeZone14TempTransition6__initZ@Base 12
+ _D65TypeInfo_E3std8typecons__T4FlagVAyaa11_776974685061727469616cZQBk6__initZ@Base 12
+ _D65TypeInfo_E4core3sys5linux10perf_event26perf_hw_cache_op_result_id6__initZ@Base 12
+ _D65TypeInfo_S3std12experimental9allocator12gc_allocator11GCAllocator6__initZ@Base 12
+ _D65TypeInfo_S3std4math10operations__T23FloatingPointBitpatternTdZQBc6__initZ@Base 12
+ _D65TypeInfo_S3std4math10operations__T23FloatingPointBitpatternTeZQBc6__initZ@Base 12
+ _D65TypeInfo_S3std5array__T8AppenderTACQz3zip13ArchiveMemberZQBi4Data6__initZ@Base 12
+ _D65TypeInfo_S3std6random__T14XorshiftEngineTkVki96Vii10ViN5ViN26ZQBm6__initZ@Base 12
+ _D65TypeInfo_S3std8typecons__T5TupleTmVAyaa3_706f73TmVQpa3_6c656eZQBk6__initZ@Base 12
+ _D65TypeInfo_S4core4sync7rwmutex14ReadWriteMutex6Reader12MonitorProxy6__initZ@Base 12
+ _D65TypeInfo_S4core4sync7rwmutex14ReadWriteMutex6Writer12MonitorProxy6__initZ@Base 12
+ _D65TypeInfo_S4core8internal2gc4impl12conservativeQw15LargeObjectPool6__initZ@Base 12
+ _D65TypeInfo_S4core8internal2gc4impl12conservativeQw15SmallObjectPool6__initZ@Base 12
+ _D65TypeInfo_S4core8internal8lifetime__T10emplaceRefTaTaTaZQtFKaKaZ1S6__initZ@Base 12
+ _D65TypeInfo_S4core8internal8lifetime__T10emplaceRefTkTkTkZQtFKkKkZ1S6__initZ@Base 12
+ _D65TypeInfo_S4core8internal8lifetime__T10emplaceRefTwTwTwZQtFKwKwZ1S6__initZ@Base 12
+ _D65TypeInfo_xAS3std8datetime8timezone13PosixTimeZone14TempTransition6__initZ@Base 12
+ _D65TypeInfo_xE4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa7AddType6__initZ@Base 12
+ _D66TypeInfo_S3std12experimental6logger11multilogger16MultiLoggerEntry6__initZ@Base 12
+ _D66TypeInfo_S3std5array__T8AppenderTASQz6socket11AddressInfoZQBj4Data6__initZ@Base 12
+ _D66TypeInfo_S3std6random__T14XorshiftEngineTkVki128Vii11ViN8ViN19ZQBn6__initZ@Base 12
+ _D66TypeInfo_S3std6random__T14XorshiftEngineTkVki32Vii13ViN17Vii15ZQBn6__initZ@Base 12
+ _D66TypeInfo_S3std6random__T14XorshiftEngineTkVki64Vii10ViN13ViN10ZQBn6__initZ@Base 12
+ _D66TypeInfo_S3std8internal14unicode_tables__T9TrieEntryTtVii12Vii9ZQw6__initZ@Base 12
+ _D66TypeInfo_S3std8typecons__T5TupleTSQy5range__T10OnlyResultTaZQpZQBl6__initZ@Base 12
+ _D66TypeInfo_S4core8internal8lifetime__T10emplaceRefTyaTaThZQuFKaKhZ1S6__initZ@Base 12
+ _D66TypeInfo_xS4core8internal2gc4impl12conservativeQw15SmallObjectPool6__initZ@Base 12
+ _D67TypeInfo_AS3std12experimental6logger11multilogger16MultiLoggerEntry6__initZ@Base 12
+ _D67TypeInfo_E3std8typecons__T4FlagVAyaa12_437265617465466f6c646572ZQBm6__initZ@Base 12
+ _D67TypeInfo_E3std8typecons__T4FlagVAyaa12_7468726f774f6e4572726f72ZQBm6__initZ@Base 12
+ _D67TypeInfo_PxS4core8internal2gc4impl12conservativeQw15SmallObjectPool6__initZ@Base 12
+ _D67TypeInfo_S3std3uni__T10assumeSizeS_DQBaQz5low_8FNaNbNiNfkZkVmi8ZQBr6__initZ@Base 12
+ _D67TypeInfo_S4core3sys5posix6signal9siginfo_t11_sifields_t10_sigpoll_t6__initZ@Base 12
+ _D67TypeInfo_S4core6stdcpp11type_traits__T17integral_constantTbVbi0ZQBa6__initZ@Base 12
+ _D67TypeInfo_S4core6stdcpp11type_traits__T17integral_constantTbVbi1ZQBa6__initZ@Base 12
+ _D67TypeInfo_xPS4core8internal2gc4impl12conservativeQw15SmallObjectPool6__initZ@Base 12
+ _D68TypeInfo_AxPS4core8internal2gc4impl12conservativeQw15SmallObjectPool6__initZ@Base 12
+ _D68TypeInfo_E4core3sys5linux10perf_event29perf_branch_sample_type_shift6__initZ@Base 12
+ _D68TypeInfo_E4core8demangle__T8DemangleTSQBcQBa7NoHooksZQBa10IsDelegate6__initZ@Base 12
+ _D68TypeInfo_S3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi12ZQsTtZQBs6__initZ@Base 12
+ _D68TypeInfo_S3std4math8rounding__T9floorImplTdZQnFNaNbNiNexdZ9floatBits6__initZ@Base 12
+ _D68TypeInfo_S3std4math8rounding__T9floorImplTeZQnFNaNbNiNexeZ9floatBits6__initZ@Base 12
+ _D68TypeInfo_S3std4math8rounding__T9floorImplTfZQnFNaNbNiNexfZ9floatBits6__initZ@Base 12
+ _D68TypeInfo_S3std5regex8internal9kickstart__T7ShiftOrTaZQl11ShiftThread6__initZ@Base 12
+ _D68TypeInfo_S4core3sys5posix6signal9siginfo_t11_sifields_t11_sigchild_t6__initZ@Base 12
+ _D68TypeInfo_S4core3sys5posix6signal9siginfo_t11_sifields_t11_sigfault_t6__initZ@Base 12
+ _D68TypeInfo_S4core8internal2gc4impl12conservativeQw3Gcx14ScanThreadData6__initZ@Base 12
+ _D68TypeInfo_S4core8internal8lifetime__T10emplaceRefTxaTaTxaZQvFKaKxaZ1S6__initZ@Base 12
+ _D69TypeInfo_AS3std5regex8internal9kickstart__T7ShiftOrTaZQl11ShiftThread6__initZ@Base 12
+ _D69TypeInfo_E3std8typecons__T4FlagVAyaa13_6361736553656e736974697665ZQBo6__initZ@Base 12
+ _D69TypeInfo_E3std8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBo6__initZ@Base 12
+ _D69TypeInfo_S3std12experimental9allocator10mallocator17AlignedMallocator6__initZ@Base 12
+ _D69TypeInfo_S3std12experimental9allocator14mmap_allocator13MmapAllocator6__initZ@Base 12
+ _D69TypeInfo_S3std8internal14unicode_tables__T9TrieEntryTbVii8Vii4Vii9ZQz6__initZ@Base 12
+ _D69TypeInfo_S3std8internal14unicode_tables__T9TrieEntryTbVii8Vii5Vii8ZQz6__initZ@Base 12
+ _D69TypeInfo_S3std8internal14unicode_tables__T9TrieEntryTbVii8Vii6Vii7ZQz6__initZ@Base 12
+ _D69TypeInfo_S3std8internal14unicode_tables__T9TrieEntryThVii8Vii7Vii6ZQz6__initZ@Base 12
+ _D69TypeInfo_S3std8internal14unicode_tables__T9TrieEntryTtVii8Vii7Vii6ZQz6__initZ@Base 12
+ _D69TypeInfo_S3std8internal14unicode_tables__T9TrieEntryTtVii8Vii8Vii5ZQz6__initZ@Base 12
+ _D69TypeInfo_S4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks6__initZ@Base 12
+ _D69TypeInfo_xS3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi12ZQsTtZQBs6__initZ@Base 12
+ _D69TypeInfo_xS4core8internal2gc4impl12conservativeQw3Gcx14ScanThreadData6__initZ@Base 12
+ _D6Object6__initZ@Base 12
+ _D6Object6__vtblZ@Base 12
+ _D6Object7__ClassZ@Base 12
+ _D6object10ModuleInfo11xgetMembersMxFNaNbNdNiZPv@Base 12
+ _D6object10ModuleInfo12localClassesMxFNaNbNdNiNjZAC14TypeInfo_Class@Base 12
+ _D6object10ModuleInfo15importedModulesMxFNaNbNdNiNjZAyPSQCcQBy@Base 12
+ _D6object10ModuleInfo4ctorMxFNaNbNdNiZPFZv@Base 12
+ _D6object10ModuleInfo4dtorMxFNaNbNdNiZPFZv@Base 12
+ _D6object10ModuleInfo4nameMxFNaNbNdNiNjZAya@Base 12
+ _D6object10ModuleInfo5flagsMxFNaNbNdNiZk@Base 12
+ _D6object10ModuleInfo5ictorMxFNaNbNdNiZPFZv@Base 12
+ _D6object10ModuleInfo5indexMxFNaNbNdNiZk@Base 12
+ _D6object10ModuleInfo6__initZ@Base 12
+ _D6object10ModuleInfo6addrOfMxFNaNbNiNjiZPv@Base 12
+ _D6object10ModuleInfo7opApplyFMDFPSQBhQBdZiZi@Base 12
+ _D6object10ModuleInfo7tlsctorMxFNaNbNdNiZPFZv@Base 12
+ _D6object10ModuleInfo7tlsdtorMxFNaNbNdNiZPFZv@Base 12
+ _D6object10ModuleInfo8opAssignMFxSQBgQBcZv@Base 12
+ _D6object10ModuleInfo8unitTestMxFNaNbNdNiZPFZv@Base 12
+ _D6object10_xopEqualsFIPvIQdZb@Base 12
+ _D6object10getElementFNaNbNeNkMNgC8TypeInfoZNgQn@Base 12
+ _D6object11__moduleRefZ@Base 12
+ _D6object12__ModuleInfoZ@Base 12
+ _D6object12getArrayHashFNbNeMxC8TypeInfoMxPvxmZ15hasCustomToHashFNaNbNeMxQBrZb@Base 12
+ _D6object12getArrayHashFNbNeMxC8TypeInfoMxPvxmZm@Base 12
+ _D6object12setSameMutexFOC6ObjectOQjZv@Base 12
+ _D6object13TypeInfo_Enum11initializerMxFNaNbNiNfZAxv@Base 12
+ _D6object13TypeInfo_Enum4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D6object13TypeInfo_Enum4swapMxFPvQcZv@Base 12
+ _D6object13TypeInfo_Enum5flagsMxFNaNbNdNiNfZk@Base 12
+ _D6object13TypeInfo_Enum5offTiMxFZAxSQBj14OffsetTypeInfo@Base 12
+ _D6object13TypeInfo_Enum5tsizeMxFNaNbNdNiNfZm@Base 12
+ _D6object13TypeInfo_Enum6equalsMxFIPvIQdZb@Base 12
+ _D6object13TypeInfo_Enum6rtInfoMxFNaNbNdNiNfZPyv@Base 12
+ _D6object13TypeInfo_Enum6talignMxFNaNbNdNiNfZm@Base 12
+ _D6object13TypeInfo_Enum7compareMxFIPvIQdZi@Base 12
+ _D6object13TypeInfo_Enum7destroyMxFPvZv@Base 12
+ _D6object13TypeInfo_Enum7getHashMxFNbNfMxPvZm@Base 12
+ _D6object13TypeInfo_Enum8opEqualsMFC6ObjectZb@Base 12
+ _D6object13TypeInfo_Enum8postblitMxFPvZv@Base 12
+ _D6object13TypeInfo_Enum8toStringMxFNaNbNfZAya@Base 12
+ _D6object14OffsetTypeInfo11__xopEqualsMxFKxSQBqQBmZb@Base 12
+ _D6object14OffsetTypeInfo6__initZ@Base 12
+ _D6object14OffsetTypeInfo9__xtoHashFNbNeKxSQBpQBlZm@Base 12
+ _D6object14TypeInfo_Array11initializerMxFNaNbNiNeZAxv@Base 12
+ _D6object14TypeInfo_Array4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D6object14TypeInfo_Array4swapMxFPvQcZv@Base 12
+ _D6object14TypeInfo_Array5flagsMxFNaNbNdNiNfZk@Base 12
+ _D6object14TypeInfo_Array5tsizeMxFNaNbNdNiNfZm@Base 12
+ _D6object14TypeInfo_Array6equalsMxFIPvIQdZb@Base 12
+ _D6object14TypeInfo_Array6rtInfoMxFNaNbNdNiNfZPyv@Base 12
+ _D6object14TypeInfo_Array6talignMxFNaNbNdNiNfZm@Base 12
+ _D6object14TypeInfo_Array7compareMxFIPvIQdZi@Base 12
+ _D6object14TypeInfo_Array7getHashMxFNbNeMxPvZm@Base 12
+ _D6object14TypeInfo_Array8opEqualsMFC6ObjectZb@Base 12
+ _D6object14TypeInfo_Array8toStringMxFNbNfZAya@Base 12
+ _D6object14TypeInfo_Class10ClassFlags6__initZ@Base 12
+ _D6object14TypeInfo_Class11initializerMxFNaNbNiNfZAxv@Base 12
+ _D6object14TypeInfo_Class4findFMxAaZxCQBd@Base 12
+ _D6object14TypeInfo_Class4infoMxFNaNbNdNiNjNfZxCQBn@Base 12
+ _D6object14TypeInfo_Class5flagsMxFNaNbNdNiNfZk@Base 12
+ _D6object14TypeInfo_Class5offTiMxFNaNbNdZAxSQBq14OffsetTypeInfo@Base 12
+ _D6object14TypeInfo_Class5tsizeMxFNaNbNdNiNfZm@Base 12
+ _D6object14TypeInfo_Class6createMxFZC6Object@Base 12
+ _D6object14TypeInfo_Class6equalsMxFIPvIQdZb@Base 12
+ _D6object14TypeInfo_Class6rtInfoMxFNaNbNdNiNfZPyv@Base 12
+ _D6object14TypeInfo_Class7compareMxFIPvIQdZi@Base 12
+ _D6object14TypeInfo_Class7getHashMxFNbNeMxPvZm@Base 12
+ _D6object14TypeInfo_Class8isBaseOfMxFNaNbNiNeMxCQBnZb@Base 12
+ _D6object14TypeInfo_Class8opEqualsMxFNbNfxC8TypeInfoZb@Base 12
+ _D6object14TypeInfo_Class8toStringMxFNaNbNfZAya@Base 12
+ _D6object14TypeInfo_Class8typeinfoMxFNaNbNdNiNjNfZxCQBr@Base 12
+ _D6object14TypeInfo_Const11initializerMxFNaNbNiNfZAxv@Base 12
+ _D6object14TypeInfo_Const4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D6object14TypeInfo_Const4swapMxFPvQcZv@Base 12
+ _D6object14TypeInfo_Const5flagsMxFNaNbNdNiNfZk@Base 12
+ _D6object14TypeInfo_Const5tsizeMxFNaNbNdNiNfZm@Base 12
+ _D6object14TypeInfo_Const6equalsMxFIPvIQdZb@Base 12
+ _D6object14TypeInfo_Const6talignMxFNaNbNdNiNfZm@Base 12
+ _D6object14TypeInfo_Const7compareMxFIPvIQdZi@Base 12
+ _D6object14TypeInfo_Const7getHashMxFNbNfMxPvZm@Base 12
+ _D6object14TypeInfo_Const8opEqualsMFC6ObjectZb@Base 12
+ _D6object14TypeInfo_Const8toStringMxFNbNfZAya@Base 12
+ _D6object14TypeInfo_Inout8toStringMxFNbNfZAya@Base 12
+ _D6object14TypeInfo_Tuple11initializerMxFNaNbNiNeZAxv@Base 12
+ _D6object14TypeInfo_Tuple4swapMxFPvQcZv@Base 12
+ _D6object14TypeInfo_Tuple5tsizeMxFNaNbNdNiNfZm@Base 12
+ _D6object14TypeInfo_Tuple6equalsMxFIPvIQdZb@Base 12
+ _D6object14TypeInfo_Tuple6talignMxFNaNbNdNiNfZm@Base 12
+ _D6object14TypeInfo_Tuple7compareMxFIPvIQdZi@Base 12
+ _D6object14TypeInfo_Tuple7destroyMxFPvZv@Base 12
+ _D6object14TypeInfo_Tuple7getHashMxFNbNfMxPvZm@Base 12
+ _D6object14TypeInfo_Tuple8opEqualsMFC6ObjectZb@Base 12
+ _D6object14TypeInfo_Tuple8postblitMxFPvZv@Base 12
+ _D6object14TypeInfo_Tuple8toStringMxFNbNfZAya@Base 12
+ _D6object15TypeInfo_Shared8toStringMxFNbNfZAya@Base 12
+ _D6object15TypeInfo_Struct11StructFlags6__initZ@Base 12
+ _D6object15TypeInfo_Struct11_memberFunc6__initZ@Base 12
+ _D6object15TypeInfo_Struct11initializerMxFNaNbNiNfZAxv@Base 12
+ _D6object15TypeInfo_Struct4nameMxFNbNdNeZ19demangledNamesCacheHPxvAya@Base 12
+ _D6object15TypeInfo_Struct4nameMxFNbNdNeZAya@Base 12
+ _D6object15TypeInfo_Struct5flagsMxFNaNbNdNiNfZk@Base 12
+ _D6object15TypeInfo_Struct5tsizeMxFNaNbNdNiNfZm@Base 12
+ _D6object15TypeInfo_Struct6equalsMxFNaNbNeIPvIQdZb@Base 12
+ _D6object15TypeInfo_Struct6rtInfoMxFNaNbNdNiNfZPyv@Base 12
+ _D6object15TypeInfo_Struct6talignMxFNaNbNdNiNfZm@Base 12
+ _D6object15TypeInfo_Struct6toHashMxFNbNfZm@Base 12
+ _D6object15TypeInfo_Struct7compareMxFNaNbNeIPvIQdZi@Base 12
+ _D6object15TypeInfo_Struct7destroyMxFPvZv@Base 12
+ _D6object15TypeInfo_Struct7getHashMxFNaNbNeMxPvZm@Base 12
+ _D6object15TypeInfo_Struct8opEqualsMFC6ObjectZb@Base 12
+ _D6object15TypeInfo_Struct8postblitMxFPvZv@Base 12
+ _D6object15TypeInfo_Struct8toStringMxFNbNfZAya@Base 12
+ _D6object15TypeInfo_Vector11initializerMxFNaNbNiNfZAxv@Base 12
+ _D6object15TypeInfo_Vector4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D6object15TypeInfo_Vector4swapMxFPvQcZv@Base 12
+ _D6object15TypeInfo_Vector5flagsMxFNaNbNdNiNfZk@Base 12
+ _D6object15TypeInfo_Vector5tsizeMxFNaNbNdNiNfZm@Base 12
+ _D6object15TypeInfo_Vector6equalsMxFIPvIQdZb@Base 12
+ _D6object15TypeInfo_Vector6talignMxFNaNbNdNiNfZm@Base 12
+ _D6object15TypeInfo_Vector7compareMxFIPvIQdZi@Base 12
+ _D6object15TypeInfo_Vector7getHashMxFNbNfMxPvZm@Base 12
+ _D6object15TypeInfo_Vector8opEqualsMFC6ObjectZb@Base 12
+ _D6object15TypeInfo_Vector8toStringMxFNbNfZAya@Base 12
+ _D6object16TypeInfo_Pointer11initializerMxFNaNbNiNeZAxv@Base 12
+ _D6object16TypeInfo_Pointer4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D6object16TypeInfo_Pointer4swapMxFPvQcZv@Base 12
+ _D6object16TypeInfo_Pointer5flagsMxFNaNbNdNiNfZk@Base 12
+ _D6object16TypeInfo_Pointer5tsizeMxFNaNbNdNiNfZm@Base 12
+ _D6object16TypeInfo_Pointer6equalsMxFIPvIQdZb@Base 12
+ _D6object16TypeInfo_Pointer7compareMxFIPvIQdZi@Base 12
+ _D6object16TypeInfo_Pointer7getHashMxFNbNeMxPvZm@Base 12
+ _D6object16TypeInfo_Pointer8opEqualsMFC6ObjectZb@Base 12
+ _D6object16TypeInfo_Pointer8toStringMxFNbNfZAya@Base 12
+ _D6object17TypeInfo_Delegate11initializerMxFNaNbNiNeZAxv@Base 12
+ _D6object17TypeInfo_Delegate5flagsMxFNaNbNdNiNfZk@Base 12
+ _D6object17TypeInfo_Delegate5tsizeMxFNaNbNdNiNfZm@Base 12
+ _D6object17TypeInfo_Delegate6equalsMxFIPvIQdZb@Base 12
+ _D6object17TypeInfo_Delegate6rtInfoMxFNaNbNdNiNfZPyv@Base 12
+ _D6object17TypeInfo_Delegate6talignMxFNaNbNdNiNfZm@Base 12
+ _D6object17TypeInfo_Delegate7compareMxFIPvIQdZi@Base 12
+ _D6object17TypeInfo_Delegate7getHashMxFNbNeMxPvZm@Base 12
+ _D6object17TypeInfo_Delegate8opEqualsMFC6ObjectZb@Base 12
+ _D6object17TypeInfo_Delegate8toStringMxFNaNbNeZAya@Base 12
+ _D6object17TypeInfo_Function11initializerMxFNaNbNiNfZAxv@Base 12
+ _D6object17TypeInfo_Function5tsizeMxFNaNbNdNiNfZm@Base 12
+ _D6object17TypeInfo_Function6rtInfoMxFNaNbNdNiNfZPyv@Base 12
+ _D6object17TypeInfo_Function8opEqualsMFC6ObjectZb@Base 12
+ _D6object17TypeInfo_Function8toStringMxFNaNbNeZAya@Base 12
+ _D6object18TypeInfo_Interface11initializerMxFNaNbNiNeZAxv@Base 12
+ _D6object18TypeInfo_Interface5flagsMxFNaNbNdNiNfZk@Base 12
+ _D6object18TypeInfo_Interface5tsizeMxFNaNbNdNiNfZm@Base 12
+ _D6object18TypeInfo_Interface6equalsMxFIPvIQdZb@Base 12
+ _D6object18TypeInfo_Interface7compareMxFIPvIQdZi@Base 12
+ _D6object18TypeInfo_Interface7getHashMxFNbNeMxPvZm@Base 12
+ _D6object18TypeInfo_Interface8isBaseOfMxFNaNbNiNeMxC14TypeInfo_ClassZb@Base 12
+ _D6object18TypeInfo_Interface8isBaseOfMxFNaNbNiNeMxCQBrZb@Base 12
+ _D6object18TypeInfo_Interface8opEqualsMFC6ObjectZb@Base 12
+ _D6object18TypeInfo_Interface8toStringMxFNaNbNfZAya@Base 12
+ _D6object18TypeInfo_Invariant8toStringMxFNbNfZAya@Base 12
+ _D6object19__cpp_type_info_ptr6__initZ@Base 12
+ _D6object19__cpp_type_info_ptr6__vtblZ@Base 12
+ _D6object19__cpp_type_info_ptr7__ClassZ@Base 12
+ _D6object20TypeInfo_StaticArray11initializerMxFNaNbNiNfZAxv@Base 12
+ _D6object20TypeInfo_StaticArray4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D6object20TypeInfo_StaticArray4swapMxFPvQcZv@Base 12
+ _D6object20TypeInfo_StaticArray5flagsMxFNaNbNdNiNfZk@Base 12
+ _D6object20TypeInfo_StaticArray5tsizeMxFNaNbNdNiNfZm@Base 12
+ _D6object20TypeInfo_StaticArray6equalsMxFIPvIQdZb@Base 12
+ _D6object20TypeInfo_StaticArray6rtInfoMxFNaNbNdNiNfZPyv@Base 12
+ _D6object20TypeInfo_StaticArray6talignMxFNaNbNdNiNfZm@Base 12
+ _D6object20TypeInfo_StaticArray7compareMxFIPvIQdZi@Base 12
+ _D6object20TypeInfo_StaticArray7destroyMxFPvZv@Base 12
+ _D6object20TypeInfo_StaticArray7getHashMxFNbNeMxPvZm@Base 12
+ _D6object20TypeInfo_StaticArray8opEqualsMFC6ObjectZb@Base 12
+ _D6object20TypeInfo_StaticArray8postblitMxFPvZv@Base 12
+ _D6object20TypeInfo_StaticArray8toStringMxFNbNfZAya@Base 12
+ _D6object25TypeInfo_AssociativeArray11initializerMxFNaNbNiNeZAxv@Base 12
+ _D6object25TypeInfo_AssociativeArray4nextMNgFNaNbNdNiZNgC8TypeInfo@Base 12
+ _D6object25TypeInfo_AssociativeArray5flagsMxFNaNbNdNiNfZk@Base 12
+ _D6object25TypeInfo_AssociativeArray5tsizeMxFNaNbNdNiNfZm@Base 12
+ _D6object25TypeInfo_AssociativeArray6equalsMxFNeIPvIQdZb@Base 12
+ _D6object25TypeInfo_AssociativeArray6talignMxFNaNbNdNiNfZm@Base 12
+ _D6object25TypeInfo_AssociativeArray7getHashMxFNbNeMxPvZm@Base 12
+ _D6object25TypeInfo_AssociativeArray8opEqualsMFC6ObjectZb@Base 12
+ _D6object25TypeInfo_AssociativeArray8toStringMxFNbNfZAya@Base 12
+ _D6object2AA6__initZ@Base 12
+ _D6object5Error6__ctorMFNaNbNiNfAyaCQBi9ThrowableZCQBxQBt@Base 12
+ _D6object5Error6__ctorMFNaNbNiNfAyaQdmCQBl9ThrowableZCQCaQBw@Base 12
+ _D6object5Error6__initZ@Base 12
+ _D6object5Error6__vtblZ@Base 12
+ _D6object5Error7__ClassZ@Base 12
+ _D6object6Object5opCmpMFCQqZi@Base 12
+ _D6object6Object6toHashMFNbNeZm@Base 12
+ _D6object6Object7Monitor11__InterfaceZ@Base 12
+ _D6object6Object7factoryFAyaZCQv@Base 12
+ _D6object6Object8opEqualsMFCQtZb@Base 12
+ _D6object6Object8toStringMFZAya@Base 12
+ _D6object7AARange6__initZ@Base 12
+ _D6object7_xopCmpFIPvIQdZb@Base 12
+ _D6object8TypeInfo4nextMNgFNaNbNdNiZNgCQBe@Base 12
+ _D6object8TypeInfo4swapMxFPvQcZv@Base 12
+ _D6object8TypeInfo5flagsMxFNaNbNdNiNfZk@Base 12
+ _D6object8TypeInfo5offTiMxFZAxSQBd14OffsetTypeInfo@Base 12
+ _D6object8TypeInfo5opCmpMFC6ObjectZi@Base 12
+ _D6object8TypeInfo5tsizeMxFNaNbNdNiNfZm@Base 12
+ _D6object8TypeInfo6equalsMxFIPvIQdZb@Base 12
+ _D6object8TypeInfo6rtInfoMxFNaNbNdNiNfZPyv@Base 12
+ _D6object8TypeInfo6talignMxFNaNbNdNiNfZm@Base 12
+ _D6object8TypeInfo6toHashMxFNbNeZm@Base 12
+ _D6object8TypeInfo7compareMxFIPvIQdZi@Base 12
+ _D6object8TypeInfo7destroyMxFPvZv@Base 12
+ _D6object8TypeInfo7getHashMxFNbNeMxPvZm@Base 12
+ _D6object8TypeInfo8opEqualsMFC6ObjectZb@Base 12
+ _D6object8TypeInfo8opEqualsMxFNbNfxCQBbZb@Base 12
+ _D6object8TypeInfo8postblitMxFPvZv@Base 12
+ _D6object8TypeInfo8toStringMxFNbNfZAya@Base 12
+ _D6object9Exception6__ctorMFNaNbNiNfAyaCQBm9ThrowableQrmZCQBx@Base 12
+ _D6object9Exception6__ctorMFNaNbNiNfAyaQdmCQBp9ThrowableZCQBx@Base 12
+ _D6object9Interface11__xopEqualsMxFKxSQBkQBgZb@Base 12
+ _D6object9Interface6__initZ@Base 12
+ _D6object9Interface9__xtoHashFNbNeKxSQBjQBfZm@Base 12
+ _D6object9Throwable13chainTogetherFNaNbNiNkMCQBrQBnNkMQkZQn@Base 12
+ _D6object9Throwable4nextMFNaNbNdNiNlNfCQBlQBhZv@Base 12
+ _D6object9Throwable4nextMNgFNaNbNdNiNjNfZNgCQBqQBm@Base 12
+ _D6object9Throwable6__ctorMFNaNbNiNfAyaCQBmQBiZQi@Base 12
+ _D6object9Throwable6__ctorMFNaNbNiNfAyaQdmCQBpQBlZQi@Base 12
+ _D6object9Throwable6__dtorMFNbNeZv@Base 12
+ _D6object9Throwable6__initZ@Base 12
+ _D6object9Throwable6__vtblZ@Base 12
+ _D6object9Throwable7__ClassZ@Base 12
+ _D6object9Throwable7messageMxFNbNfZAxa@Base 12
+ _D6object9Throwable7opApplyMFMDFCQBfQBbZiZi@Base 12
+ _D6object9Throwable8refcountMFNaNbNcNiNjZk@Base 12
+ _D6object9Throwable8toStringMFZAya@Base 12
+ _D6object9Throwable8toStringMxFMDFIAaZvZv@Base 12
+ _D6object9Throwable9TraceInfo11__InterfaceZ@Base 12
+ _D6object__T10RTInfoImplVAmA2i104i1281ZQBbyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i104i2048ZQBbyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i104i2472ZQBbyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i104i7083ZQBbyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i112i10922ZQBcyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i112i11178ZQBcyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i112i11274ZQBcyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i112i1225ZQBbyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i112i3ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i112i424ZQBayG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i112i5462ZQBbyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i112i721ZQBayG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i112i8192ZQBbyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i112i8616ZQBbyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i120i16424ZQBcyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i120i21610ZQBcyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i120i21844ZQBcyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i120i4ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i120i5462ZQBbyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i120i7920ZQBbyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i128i11304ZQBcyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i128i12161ZQBcyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i128i12ZQzyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i128i15784ZQBcyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i128i43690ZQBcyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i128i512ZQBayG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i128i54610ZQBcyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i12i1ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i136i1188ZQBbyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i136i20507ZQBcyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i136i21928ZQBcyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i136i2728ZQBbyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i136i43178ZQBcyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i136i76461ZQBcyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i137i4900ZQBbyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i144i76461ZQBcyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i152i262144ZQBdyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i152i347816ZQBdyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i152i349524ZQBdyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i156i4900ZQBbyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i160i873844ZQBdyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i168i1397802ZQBeyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i168i2ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i168i4244ZQBbyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i169i112492ZQBdyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i16i1ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i16i2ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i16i3ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i176i2970996ZQBeyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i176i3931280ZQBeyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i177i3931280ZQBeyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i184i2763412ZQBeyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i184i6990180ZQBeyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i192i12319888ZQBfyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i192i2763412ZQBeyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i192i3331414ZQBeyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i192i8388608ZQBeyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i200i22364842ZQBfyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i200i23767396ZQBfyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i200i3331414ZQBeyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i204i1448ZQBbyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i208i41943044ZQBfyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i216i41943044ZQBfyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i216i8011774ZQBeyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i224i183150948ZQBgyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i232i1448ZQBbyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i24i1ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i24i2ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i24i3ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i24i4ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i24i5ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i24i6ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i24i7ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i256i8388608ZQBeyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i257i2859116900ZQBhyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i272i1ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i272i2158144171ZQBhyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i28i4ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i328i730183585960ZQBjyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i32i10ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i32i11ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i32i12ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i32i13ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i32i14ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i32i15ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i32i1ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i32i2ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i32i3ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i32i4ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i32i5ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i32i7ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i32i8ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i32i9ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i36i8ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i397i93744818902396ZQBlyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i40i10ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i40i11ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i40i15ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i40i16ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i40i18ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i40i1ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i40i20ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i40i21ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i40i22ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i40i23ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i40i24ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i40i2ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i40i30ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i40i31ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i40i8ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i44i12ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i48i15ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i48i16ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i48i1ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i48i20ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i48i24ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i48i2ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i48i31ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i48i32ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i48i42ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i48i44ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i48i45ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i48i56ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i48i59ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i48i5ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i48i63ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i56i10ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i56i123ZQzyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i56i1ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i56i21ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i56i24ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i56i28ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i56i40ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i56i42ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i56i64ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i56i80ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i56i84ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i64i10ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i64i128ZQzyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i64i134ZQzyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i64i168ZQzyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i64i176ZQzyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i64i34ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i64i60ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i64i9ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i72i168ZQzyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i72i171ZQzyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i72i256ZQzyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i72i296ZQzyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i72i336ZQzyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i72i4ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i72i5ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i76i424ZQzyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i80i168ZQzyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i80i241ZQzyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i80i248ZQzyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i80i2ZQxyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i80i424ZQzyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i80i512ZQzyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i80i516ZQzyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i80i546ZQzyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i80i808ZQzyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i88i1028ZQBayG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i88i1048ZQBayG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i88i1448ZQBayG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i88i1462ZQBayG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i88i18ZQyyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i88i424ZQzyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i8i1ZQwyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i96i1023ZQBayG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i96i1154ZQBayG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i96i3496ZQBayG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i96i424ZQzyG2m@Base 12
+ _D6object__T10RTInfoImplVAmA2i97i2728ZQBayG2m@Base 12
+ _D6object__T10RTInfoImplVAmA3i968i268435462i0ZQBiyG3m@Base 12
+ _D6object__T10RTInfoImplVAmA4i1064i549755827528i0i16ZQBpyG4m@Base 12
+ _D6object__T10RTInfoImplVAmA4i1088i549755827528i0i240ZQBqyG4m@Base 12
+ _D6object__T10RTInfoImplVAmA4i1152i144107491482206208i565149010231808i0ZQCiyG4m@Base 12
+ _D6object__T10_aaToRangeHTHAyaC3std3zip13ArchiveMemberTQBcTQBdZQBzFNaNbNiNfKQByZSQDb7AARange@Base 12
+ _D6object__T16assumeSafeAppendTE3std3uni__T16UnicodeSetParserTSQBf5regex8internal6parser__T6ParserTAyaTSQCuQBpQBmQBg7CodeGenZQBiZQDi8OperatorZQFaFNbNcKNgAEQEtQEs__TQErTQEcZQEzQBrZNgQBc@Base 12
+ _D6object__T16assumeSafeAppendTS3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBhZQCqFNbNcKNgASQCjQCi__TQChTQBvZQCpZNgQz@Base 12
+ _D6object__T16assumeSafeAppendTS3std5regex8internal9kickstart__T7ShiftOrTaZQl11ShiftThreadZQDbFNbNcKNgASQCuQCtQCqQCk__TQCdTaZQCjQBzZNgQBg@Base 12
+ _D6object__T16assumeSafeAppendTS3std8typecons__T5TupleTkTkTkZQnZQCaFNbNcKNgASQBtQBs__TQBmTkTkTkZQBwZNgQBb@Base 12
+ _D6object__T16assumeSafeAppendTaZQvFNbNgAaZNgQf@Base 12
+ _D6object__T16assumeSafeAppendTkZQvFNbNcKNgAkZNgQf@Base 12
+ _D6object__T3dupTAyaZQjFNaNbNdNfAxAyaZAQw@Base 12
+ _D6object__T3dupTS3std5regex8internal2ir8BytecodeZQBmFNaNbNdNfAxSQBvQBuQBrQBlQBlZAQCn@Base 12
+ _D6object__T3dupTaZQhFNaNbNdNfAxaZAa@Base 12
+ _D6object__T3dupThZQhFNaNbNdNfAxhZAh@Base 12
+ _D6object__T3dupTkZQhFNaNbNdNfAxkZAk@Base 12
+ _D6object__T3dupTmZQhFNaNbNdNfAxmZAm@Base 12
+ _D6object__T3getTmTmZQjFNaNfNgHmmmLNgmZNgm@Base 12
+ _D6object__T4_dupTS3std8datetime8timezone13PosixTimeZone10LeapSecondTySQCaQBzQBtQBnQBbZQCxFNaNbNeMAQDdZAyQBj@Base 12
+ _D6object__T4_dupTS3std8datetime8timezone13PosixTimeZone10TransitionTySQCaQBzQBtQBnQBbZQCxFNaNbNeMAQDdZAyQBj@Base 12
+ _D6object__T4_dupTaTyaZQlFNaNbNeMAaZAya@Base 12
+ _D6object__T4_dupTxAyaTAyaZQpFNaNbNeMAxQuZAQu@Base 12
+ _D6object__T4_dupTxS3std5regex8internal2ir8BytecodeTSQBhQBgQBdQxQwZQCdFNaNbNeMAxQCjZAQBh@Base 12
+ _D6object__T4_dupTxaTaZQlFNaNbNeMAxaZAa@Base 12
+ _D6object__T4_dupTxaTyaZQmFNaNbNeMAxaZAya@Base 12
+ _D6object__T4_dupTxhThZQlFNaNbNeMAxhZAh@Base 12
+ _D6object__T4_dupTxhTyhZQmFNaNbNeMAxhZAyh@Base 12
+ _D6object__T4_dupTxkTkZQlFNaNbNeMAxkZAk@Base 12
+ _D6object__T4_dupTxmTmZQlFNaNbNeMAxmZAm@Base 12
+ _D6object__T4_dupTyaTyaZQmFNaNbNeMAyaZQe@Base 12
+ _D6object__T4idupTS3std8datetime8timezone13PosixTimeZone10LeapSecondZQCfFNaNbNdNfAQCmZAySQCsQCrQClQCfQBt@Base 12
+ _D6object__T4idupTS3std8datetime8timezone13PosixTimeZone10TransitionZQCfFNaNbNdNfAQCmZAySQCsQCrQClQCfQBt@Base 12
+ _D6object__T4idupTaZQiFNaNbNdNfAaZAya@Base 12
+ _D6object__T4idupTxaZQjFNaNbNdNfAxaZAya@Base 12
+ _D6object__T4idupTxhZQjFNaNbNdNfAxhZAyh@Base 12
+ _D6object__T4idupTyaZQjFNaNbNdNfAyaZQe@Base 12
+ _D6object__T4keysHTHC4core6thread8osthread6ThreadQBdTQBhTQBlZQBxFNaNbNdNfQCcZAQCg@Base 12
+ _D6object__T4keysHTHS3std11concurrency3TidbTbTQBaZQBmFNaNbNdNfQBrZAQBv@Base 12
+ _D6object__T5clearTAyaTQeZQoFNaNbHQpQrZv@Base 12
+ _D6object__T7byValueHTHAyaC3std3zip13ArchiveMemberTQBcTQBdZQBvFNaNbNiNfQBxZ6Result4saveMFNaNbNdNiNfZSQDv__TQDrHTQDmTQDpTQDqZQEiFNaNbNiNfQEkZQCn@Base 12
+ _D6object__T7byValueHTHAyaC3std3zip13ArchiveMemberTQBcTQBdZQBvFNaNbNiNfQBxZ6Result5emptyMFNaNbNdNiNfZb@Base 12
+ _D6object__T7byValueHTHAyaC3std3zip13ArchiveMemberTQBcTQBdZQBvFNaNbNiNfQBxZ6Result5frontMFNaNbNcNdNiNeZQCz@Base 12
+ _D6object__T7byValueHTHAyaC3std3zip13ArchiveMemberTQBcTQBdZQBvFNaNbNiNfQBxZ6Result6__initZ@Base 12
+ _D6object__T7byValueHTHAyaC3std3zip13ArchiveMemberTQBcTQBdZQBvFNaNbNiNfQBxZ6Result8popFrontMFNaNbNiNfZv@Base 12
+ _D6object__T7byValueHTHAyaC3std3zip13ArchiveMemberTQBcTQBdZQBvFNaNbNiNfQBxZSQCw__TQCsHTQCnTQCqTQCrZQDjFNaNbNiNfQDlZ6Result@Base 12
+ _D6object__T7destroyVbi1TC4core2gc11gcinterface2GCZQBnFNbQBgZv@Base 12
+ _D6object__T7destroyVbi1TC6ObjectZQwFNbQoZv@Base 12
+ _D6object__T7destroyVbi1TS3gcc8sections3elf9ThreadDSOZQBqFNaNbNiNfKQBqZv@Base 12
+ _D6object__T7destroyVbi1TS3std11concurrency7MessageZQBoFNfKQBiZv@Base 12
+ _D6object__T7destroyVbi1TS3std12experimental9allocator15building_blocks15stats_collector__T14StatsCollectorTSQDfQDeQCtQCm6region__T6RegionTSQEkQEjQDy14mmap_allocator13MmapAllocatorVki16VEQGf8typecons__T4FlagVAyaa13_67726f77446f776e7761726473ZQBoi0ZQEnVmi4096Vmi0ZQGqZQJvFNaNbNiKQJtZv@Base 12
+ _D6object__T7destroyVbi1TS3std3net4curl3FTP4ImplZQBlFKQBdZv@Base 12
+ _D6object__T7destroyVbi1TS3std3net4curl4HTTP4ImplZQBmFKQBeZv@Base 12
+ _D6object__T7destroyVbi1TS3std3net4curl4SMTP4ImplZQBmFKQBeZv@Base 12
+ _D6object__T7destroyVbi1TS3std4file15DirIteratorImplZQBpFNfKQBjZv@Base 12
+ _D6object__T7destroyVbi1TS3std5stdio4FileZQBeFNfKQyZv@Base 12
+ _D6object__T7destroyVbi1TS4core2gc11gcinterface4RootZQBpFNaNbNiNfKQBpZv@Base 12
+ _D6object__T7destroyVbi1TS4core2gc11gcinterface5RangeZQBqFNaNbNiNfKQBqZv@Base 12
+ _D6object__T7destroyVbi1TS4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4NodeZQDoFNaNbNiNfKQDoZv@Base 12
+ _D6object__T7destroyVbi1TS4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4NodeZQDqFNaNbNiNfKQDqZv@Base 12
+ _D6object__T7destroyVbi1TS4core8internal9container7hashtab__T7HashTabTPySQCt10ModuleInfoTiZQBe4NodeZQDkFNaNbNiNfKQDkZv@Base 12
+ _D6object__T7reserveTS3std5regex8internal2ir8BytecodeZQBqFNaNbNeKAQBtmZm@Base 12
+ _D6object__T7reserveTaZQlFNaNbNeKAamZm@Base 12
+ _D6object__T7reserveTuZQlFNaNbNeKAumZm@Base 12
+ _D6object__T8_dupCtfeTS3std8datetime8timezone13PosixTimeZone10LeapSecondTySQCaQBzQBtQBnQBbZQDbFNaNbNfMAQDdZAyQBj@Base 12
+ _D6object__T8_dupCtfeTS3std8datetime8timezone13PosixTimeZone10TransitionTySQCaQBzQBtQBnQBbZQDbFNaNbNfMAQDdZAyQBj@Base 12
+ _D6object__T8_dupCtfeTaTyaZQpFNaNbNfMAaZAya@Base 12
+ _D6object__T8_dupCtfeTxAyaTAyaZQtFNaNbNfMAxQuZAQu@Base 12
+ _D6object__T8_dupCtfeTxS3std5regex8internal2ir8BytecodeTSQBhQBgQBdQxQwZQChFNaNbNfMAxQCjZAQBh@Base 12
+ _D6object__T8_dupCtfeTxaTaZQpFNaNbNfMAxaZAa@Base 12
+ _D6object__T8_dupCtfeTxaTyaZQqFNaNbNfMAxaZAya@Base 12
+ _D6object__T8_dupCtfeTxhThZQpFNaNbNfMAxhZAh@Base 12
+ _D6object__T8_dupCtfeTxhTyhZQqFNaNbNfMAxhZAyh@Base 12
+ _D6object__T8_dupCtfeTxkTkZQpFNaNbNfMAxkZAk@Base 12
+ _D6object__T8_dupCtfeTxmTmZQpFNaNbNfMAxmZAm@Base 12
+ _D6object__T8_dupCtfeTyaTyaZQqFNaNbNfMAyaZQe@Base 12
+ _D6object__T8capacityTAyaZQoFNaNbNdNeAQqZm@Base 12
+ _D6object__T8capacityTC3std3zip13ArchiveMemberZQBjFNaNbNdNeAQBmZm@Base 12
+ _D6object__T8capacityTS3std6socket11AddressInfoZQBkFNaNbNdNeAQBnZm@Base 12
+ _D6object__T8capacityTaZQmFNaNbNdNeAaZm@Base 12
+ _D6object__T8capacityThZQmFNaNbNdNeAhZm@Base 12
+ _D6object__T8capacityTlZQmFNaNbNdNeAlZm@Base 12
+ _D6object__T8opEqualsTC14TypeInfo_ClassTQsZQBfFNbNfQBdQBgZb@Base 12
+ _D6object__T8opEqualsTC3std11concurrency10MessageBoxTQBfZQBtFQBnQBqZb@Base 12
+ _D6object__T8opEqualsTC6ObjectTQjZQwFQpQrZb@Base 12
+ _D6object__T8opEqualsTC8TypeInfoTC14TypeInfo_ConstZQBnFQBhQzZb@Base 12
+ _D6object__T8opEqualsTC8TypeInfoTC15TypeInfo_StructZQBoFQBiQBaZb@Base 12
+ _D6object__T8opEqualsTC8TypeInfoTQlZQyFNbNfQvQxZb@Base 12
+ _D6object__T8opEqualsTC8TypeInfoTxCQmZQBaFNbNfQyxQpZb@Base 12
+ _D6object__T8opEqualsTxC14TypeInfo_ClassTxQtZQBhFNbNfxQBfxQBjZb@Base 12
+ _D6object__T8opEqualsTxC15TypeInfo_StructTxQuZQBiFxQBcxQBgZb@Base 12
+ _D6object__T8opEqualsTxC3std11concurrency10MessageBoxTxQBgZQBvFxQBpxQBtZb@Base 12
+ _D6object__T8opEqualsTxC3std11parallelism8TaskPoolTxQBdZQBsFxQBmxQBqZb@Base 12
+ _D6object__T8opEqualsTxC3std12experimental6logger4core6LoggerTxQBoZQCdFxQBxxQCbZb@Base 12
+ _D6object__T8opEqualsTxC3std3zip13ArchiveMemberTxQBaZQBpFxQBjxQBnZb@Base 12
+ _D6object__T8opEqualsTxC3std5regex8internal2ir__T7MatcherTaZQlTxQBpZQCeFxQByxQCcZb@Base 12
+ _D6object__T8opEqualsTxC3std6socket7AddressTxQwZQBkFxQBexQBiZb@Base 12
+ _D6object__T8opEqualsTxC3std7process3PidTxQtZQBhFxQBbxQBfZb@Base 12
+ _D6object__T8opEqualsTxC3std8datetime8timezone8TimeZoneTxQBiZQBxFxQBrxQBvZb@Base 12
+ _D6object__T8opEqualsTxC6ObjectTxQkZQyFxQrxQuZb@Base 12
+ _D6object__T8opEqualsTxC8TypeInfoTxQmZQBaFNbNfxQyxQBbZb@Base 12
+ _D6object__T8opEqualsTxCQw9ThrowableTxQpZQBdFxQxxQBaZb@Base 12
+ _D70TypeInfo_G14PxS4core8internal2gc4impl12conservativeQw15SmallObjectPool6__initZ@Base 12
+ _D70TypeInfo_PxS4core8internal2gc4impl12conservativeQw3Gcx14ScanThreadData6__initZ@Base 12
+ _D70TypeInfo_S3std3uni__T10assumeSizeS_DQBaQz8midlow_8FNaNbNiNfkZkVmi8ZQBu6__initZ@Base 12
+ _D70TypeInfo_S3std6format4spec__T10singleSpecTyaZQqFAyaZ16DummyOutputRange6__initZ@Base 12
+ _D70TypeInfo_xG14PS4core8internal2gc4impl12conservativeQw15SmallObjectPool6__initZ@Base 12
+ _D70TypeInfo_xPS4core8internal2gc4impl12conservativeQw3Gcx14ScanThreadData6__initZ@Base 12
+ _D70TypeInfo_xS4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks6__initZ@Base 12
+ _D71TypeInfo_E3std8typecons__T4FlagVAyaa14_6b656570536570617261746f7273ZQBq6__initZ@Base 12
+ _D71TypeInfo_E3std8typecons__T4FlagVAyaa14_6b6565705465726d696e61746f72ZQBq6__initZ@Base 12
+ _D71TypeInfo_S3std5regex8internal8thompson__T10ThreadListTmZQp11ThreadRange6__initZ@Base 12
+ _D71TypeInfo_S3std8typecons__T5TupleTkVAyaa4_64617461TmVQra5_636f756e74ZQBq6__initZ@Base 12
+ _D71TypeInfo_S4core8internal8lifetime__T10emplaceRefTAyaTQeTQhZQxFKQoKQrZ1S6__initZ@Base 12
+ _D72TypeInfo_S3std6format__T7sformatTaTykTykTkTkTkZQxFNkMAaMAxaykykkkkZ4Sink6__initZ@Base 12
+ _D72TypeInfo_S4core8internal2gc4impl12conservativeQw3Gcx__T9ScanRangeVbi0ZQp6__initZ@Base 12
+ _D72TypeInfo_S4core8internal2gc4impl12conservativeQw3Gcx__T9ScanRangeVbi1ZQp6__initZ@Base 12
+ _D73TypeInfo_S3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTAxaZQBh6__initZ@Base 12
+ _D74TypeInfo_S3std12experimental9allocator8showcase14mmapRegionListFmZ7Factory6__initZ@Base 12
+ _D74TypeInfo_S3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTbVmi1ZQrVmi1ZQBy6__initZ@Base 12
+ _D74TypeInfo_S3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi7ZQrVmi8ZQBy6__initZ@Base 12
+ _D74TypeInfo_S3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi8ZQrVmi8ZQBy6__initZ@Base 12
+ _D74TypeInfo_S3std8internal14unicode_tables__T9TrieEntryTbVii7Vii4Vii4Vii6ZQBd6__initZ@Base 12
+ _D74TypeInfo_S4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTPvZQr6__initZ@Base 12
+ _D74TypeInfo_xS3std9algorithm9iteration__T9MapResultSQBm5ascii7toLowerTAxaZQBh6__initZ@Base 12
+ _D75TypeInfo_E3std8typecons__T4FlagVAyaa16_616c6c6f774461794f766572666c6f77ZQBu6__initZ@Base 12
+ _D75TypeInfo_S2rt5minfo11ModuleGroup9sortCtorsMFAyaZ8findDepsMFmPmZ10stackFrame6__initZ@Base 12
+ _D75TypeInfo_S3std8typecons__T10RebindableTyCQBf8datetime8timezone8TimeZoneZQBu6__initZ@Base 12
+ _D75TypeInfo_xS3std12experimental9allocator8showcase14mmapRegionListFmZ7Factory6__initZ@Base 12
+ _D75TypeInfo_xS4core8internal2gc4impl12conservativeQw3Gcx__T11ToScanStackTPvZQr6__initZ@Base 12
+ _D76TypeInfo_S3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAkZQo6__initZ@Base 12
+ _D76TypeInfo_S3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi11ZQsVmi16ZQCa6__initZ@Base 12
+ _D76TypeInfo_S3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi12ZQsVmi16ZQCa6__initZ@Base 12
+ _D76TypeInfo_S3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi13ZQsVmi16ZQCa6__initZ@Base 12
+ _D76TypeInfo_S3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi14ZQsVmi16ZQCa6__initZ@Base 12
+ _D76TypeInfo_S3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi15ZQsVmi16ZQCa6__initZ@Base 12
+ _D76TypeInfo_S3std3uni__T13PackedPtrImplTSQBcQBb__T9BitPackedTkVmi16ZQsVmi16ZQCa6__initZ@Base 12
+ _D76TypeInfo_S3std3uni__T5StackTSQtQr__T13InversionListTSQBrQBq8GcPolicyZQBhZQCa6__initZ@Base 12
+ _D76TypeInfo_S3std5regex8internal6parser__T6ParserTAyaTSQBqQBpQBmQBg7CodeGenZQBi6__initZ@Base 12
+ _D77TypeInfo_S3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAxkZQp6__initZ@Base 12
+ _D77TypeInfo_S3std8datetime4date4Date14isoWeekAndYearMxFNaNbNdNfZ14ISOWeekAndYear6__initZ@Base 12
+ _D77TypeInfo_S4core8demangle__T6mangleTFNbNiZmZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D77TypeInfo_S4core8demangle__T6mangleTFNbNiZvZQqFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D77TypeInfo_S4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf6__initZ@Base 12
+ _D77TypeInfo_xS3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh__T9IntervalsTAkZQo6__initZ@Base 12
+ _D77TypeInfo_xS3std5regex8internal6parser__T6ParserTAyaTSQBqQBpQBmQBg7CodeGenZQBi6__initZ@Base 12
+ _D78TypeInfo_S3std8typecons__T5TupleTC15TypeInfo_StructTPSQBs11concurrency3TidZQBx6__initZ@Base 12
+ _D78TypeInfo_S3std8typecons__T5TupleTiVAyaa6_737461747573TQtVQwa6_6f7574707574ZQBx6__initZ@Base 12
+ _D78TypeInfo_S4core8demangle__T6mangleTFNbNiZPvZQrFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D78TypeInfo_xS4core8internal9container5array__T5ArrayTPS3gcc8sections3elf3DSOZQBf6__initZ@Base 12
+ _D79TypeInfo_E3std8typecons__T4FlagVAyaa18_616c6c4b6e6f776e53616d654c656e677468ZQBy6__initZ@Base 12
+ _D79TypeInfo_E3std8typecons__T4FlagVAyaa18_707265736572766541747472696275746573ZQBy6__initZ@Base 12
+ _D79TypeInfo_S4core8demangle__T6mangleTFNbNiPvZvZQsFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D79TypeInfo_S4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface4RootZQBh6__initZ@Base 12
+ _D79TypeInfo_S4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh6__initZ@Base 12
+ _D80TypeInfo_E3std12experimental9allocator15building_blocks15stats_collector7Options6__initZ@Base 12
+ _D80TypeInfo_S3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTbVmi1ZQrVmi1ZQCe6__initZ@Base 12
+ _D80TypeInfo_S3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi7ZQrVmi8ZQCe6__initZ@Base 12
+ _D80TypeInfo_S3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi8ZQrVmi8ZQCe6__initZ@Base 12
+ _D80TypeInfo_S3std8typecons__T5TupleTSQy5range__T4TakeTSQBqQt__T6RepeatTaZQkZQBcZQBz6__initZ@Base 12
+ _D80TypeInfo_S4core8demangle__T6mangleTFNbNiPvZQdZQtFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D80TypeInfo_S4core8internal5array7casting__T11__ArrayCastThTuZQsFNaNiNeNkMAhZ5Array6__initZ@Base 12
+ _D80TypeInfo_S4core8internal5array7casting__T11__ArrayCastThTwZQsFNaNiNeNkMAhZ5Array6__initZ@Base 12
+ _D80TypeInfo_S4core8internal5array7casting__T11__ArrayCastTvTmZQsFNaNiNeNkMAvZ5Array6__initZ@Base 12
+ _D80TypeInfo_S4core8internal9container5array__T5ArrayTSQBp2gc11gcinterface5RangeZQBi6__initZ@Base 12
+ _D80TypeInfo_S4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi6__initZ@Base 12
+ _D80TypeInfo_xS4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh6__initZ@Base 12
+ _D81TypeInfo_E3std8typecons__T4FlagVAyaa19_7573655265706c6163656d656e744463686172ZQCa6__initZ@Base 12
+ _D81TypeInfo_S3std6random__T24LinearCongruentialEngineTkVki16807Vki0Vki2147483647ZQCc6__initZ@Base 12
+ _D81TypeInfo_S3std6random__T24LinearCongruentialEngineTkVki48271Vki0Vki2147483647ZQCc6__initZ@Base 12
+ _D81TypeInfo_xS4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi6__initZ@Base 12
+ _D82TypeInfo_S3std12experimental9allocator15building_blocks15bitmapped_block9BitVector6__initZ@Base 12
+ _D82TypeInfo_S3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi11ZQsVmi16ZQCg6__initZ@Base 12
+ _D82TypeInfo_S3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi12ZQsVmi16ZQCg6__initZ@Base 12
+ _D82TypeInfo_S3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi13ZQsVmi16ZQCg6__initZ@Base 12
+ _D82TypeInfo_S3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi14ZQsVmi16ZQCg6__initZ@Base 12
+ _D82TypeInfo_S3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi15ZQsVmi16ZQCg6__initZ@Base 12
+ _D82TypeInfo_S3std3uni__T19PackedArrayViewImplTSQBiQBh__T9BitPackedTkVmi16ZQsVmi16ZQCg6__initZ@Base 12
+ _D82TypeInfo_S4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks11Replacement6__initZ@Base 12
+ _D82TypeInfo_S4core8internal9container5array__T5ArrayTS3gcc8sections3elf9ThreadDSOZQBk6__initZ@Base 12
+ _D82TypeInfo_S4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi6__initZ@Base 12
+ _D82TypeInfo_S4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi6__initZ@Base 12
+ _D83TypeInfo_AS4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks11Replacement6__initZ@Base 12
+ _D83TypeInfo_E3std3uni__T21genericDecodeGraphemeVbi0Z__TQBfTAxaZQBnFKQjZ13GraphemeState6__initZ@Base 12
+ _D83TypeInfo_E3std3uni__T21genericDecodeGraphemeVbi0Z__TQBfTAxwZQBnFKQjZ13GraphemeState6__initZ@Base 12
+ _D83TypeInfo_S3std5regex8internal2ir__T14BackLooperImplTSQBrQBqQBnQBh__T5InputTaZQjZQBt6__initZ@Base 12
+ _D83TypeInfo_S4core8internal5array7casting__T11__ArrayCastTxhTxuZQuFNaNiNeNkMAxhZ5Array6__initZ@Base 12
+ _D83TypeInfo_S4core8internal5array7casting__T11__ArrayCastTxhTxwZQuFNaNiNeNkMAxhZ5Array6__initZ@Base 12
+ _D83TypeInfo_xS4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks11Replacement6__initZ@Base 12
+ _D84TypeInfo_AxS4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks11Replacement6__initZ@Base 12
+ _D84TypeInfo_S4core8demangle__T6mangleTFNbMDFNbPvZvZvZQxFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D84TypeInfo_S4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface4RootZQBh4Node6__initZ@Base 12
+ _D84TypeInfo_S4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk6__initZ@Base 12
+ _D84TypeInfo_xAS4core8demangle15reencodeMangledFNaNbNfNkMAxaZ12PrependHooks11Replacement6__initZ@Base 12
+ _D85TypeInfo_S3std4conv__T7toCharsVii2TaVEQBc5ascii10LetterCasei1TkZQBqFNaNbNiNfkZ6Result6__initZ@Base 12
+ _D85TypeInfo_S3std4conv__T7toCharsVii2TaVEQBc5ascii10LetterCasei1TmZQBqFNaNbNiNfmZ6Result6__initZ@Base 12
+ _D85TypeInfo_S3std4conv__T7toCharsVii8TaVEQBc5ascii10LetterCasei1TkZQBqFNaNbNiNfkZ6Result6__initZ@Base 12
+ _D85TypeInfo_S3std4conv__T7toCharsVii8TaVEQBc5ascii10LetterCasei1TmZQBqFNaNbNiNfmZ6Result6__initZ@Base 12
+ _D85TypeInfo_S3std5regex8internal2ir__T5RegexTaZQj13namedCapturesMFNdNfZ15NamedGroupRange6__initZ@Base 12
+ _D85TypeInfo_S3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAaZQqFQgZ14ByCodeUnitImplZQCe6__initZ@Base 12
+ _D85TypeInfo_S4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi4Node6__initZ@Base 12
+ _D86TypeInfo_S3std12experimental9allocator15building_blocks14null_allocator13NullAllocator6__initZ@Base 12
+ _D86TypeInfo_S3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TiZQBrFNaNbNiNfiZ6Result6__initZ@Base 12
+ _D86TypeInfo_S3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TkZQBrFNaNbNiNfkZ6Result6__initZ@Base 12
+ _D86TypeInfo_S3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TlZQBrFNaNbNiNflZ6Result6__initZ@Base 12
+ _D86TypeInfo_S3std4conv__T7toCharsVii10TaVEQBd5ascii10LetterCasei1TmZQBrFNaNbNiNfmZ6Result6__initZ@Base 12
+ _D86TypeInfo_S3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei0TkZQBrFNaNbNiNfkZ6Result6__initZ@Base 12
+ _D86TypeInfo_S3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei0TmZQBrFNaNbNiNfmZ6Result6__initZ@Base 12
+ _D86TypeInfo_S3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei1TkZQBrFNaNbNiNfkZ6Result6__initZ@Base 12
+ _D86TypeInfo_S3std4conv__T7toCharsVii16TaVEQBd5ascii10LetterCasei1TmZQBrFNaNbNiNfmZ6Result6__initZ@Base 12
+ _D86TypeInfo_S3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAxaZQrFQhZ14ByCodeUnitImplZQCf6__initZ@Base 12
+ _D86TypeInfo_S3std8typecons__T5TupleTSQy3utf__T10byCodeUnitTAyaZQrFQhZ14ByCodeUnitImplZQCf6__initZ@Base 12
+ _D86TypeInfo_S4core8demangle__T6mangleTFNbNiAyakQeQgmZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D86TypeInfo_S4core8demangle__T6mangleTFNbPvMDFNbQhZiZvZQzFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D86TypeInfo_xS4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi4Node6__initZ@Base 12
+ _D87TypeInfo_PxS4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi4Node6__initZ@Base 12
+ _D87TypeInfo_S3std8typecons__T5TupleTbVAyaa10_7465726d696e61746564TiVQBea6_737461747573ZQCg6__initZ@Base 12
+ _D87TypeInfo_S4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4Node6__initZ@Base 12
+ _D87TypeInfo_S4core8internal9container7hashtab__T7HashTabTPyS6object10ModuleInfoTiZQBi4Node6__initZ@Base 12
+ _D87TypeInfo_xPS4core8internal9container5treap__T5TreapTSQBp2gc11gcinterface5RangeZQBi4Node6__initZ@Base 12
+ _D88TypeInfo_S3std5regex8internal2ir__T15SmallFixedArrayTSQBsQBrQBoQBi__T5GroupTmZQjVki3ZQBy6__initZ@Base 12
+ _D88TypeInfo_xS4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4Node6__initZ@Base 12
+ _D89TypeInfo_PxS4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4Node6__initZ@Base 12
+ _D89TypeInfo_S4core8demangle__T6mangleTFNbPvMDFNbQhQjZvZvZQBbFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D89TypeInfo_S4core8internal9container7hashtab__T7HashTabTPvTPS3gcc8sections3elf3DSOZQBk4Node6__initZ@Base 12
+ _D89TypeInfo_xPS4core8internal9container7hashtab__T7HashTabTAxaTS2rt9profilegc5EntryZQBi4Node6__initZ@Base 12
+ _D89TypeInfo_xS3std5regex8internal2ir__T15SmallFixedArrayTSQBsQBrQBoQBi__T5GroupTmZQjVki3ZQBy6__initZ@Base 12
+ _D8TypeInfo6__initZ@Base 12
+ _D8TypeInfo6__vtblZ@Base 12
+ _D8TypeInfo7__ClassZ@Base 12
+ _D90TypeInfo_S3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi13ZQsTSQCcQCb__TQBfTbVmi1ZQBpZQCo6__initZ@Base 12
+ _D90TypeInfo_S3std5range__T11SortedRangeTAkVAyaa5_61203c2062VEQBwQBv18SortedRangeOptionsi0ZQCm6__initZ@Base 12
+ _D90TypeInfo_S3std8typecons__T10RebindableTxCQBf5regex8internal2ir__T14MatcherFactoryTaZQtZQCj6__initZ@Base 12
+ _D90TypeInfo_S6object__T7byValueHTHAyaC3std3zip13ArchiveMemberTQBcTQBdZQBvFNaNbNiNfQBxZ6Result6__initZ@Base 12
+ _D91TypeInfo_S3std5range__T11SortedRangeTAAyaVQea5_61203c2062VEQBxQBw18SortedRangeOptionsi0ZQCn6__initZ@Base 12
+ _D91TypeInfo_xS3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi13ZQsTSQCcQCb__TQBfTbVmi1ZQBpZQCo6__initZ@Base 12
+ _D91TypeInfo_xS3std8typecons__T10RebindableTxCQBf5regex8internal2ir__T14MatcherFactoryTaZQtZQCj6__initZ@Base 12
+ _D92TypeInfo_S3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpThZQCq6__initZ@Base 12
+ _D92TypeInfo_S3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpTtZQCq6__initZ@Base 12
+ _D92TypeInfo_S3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi16ZQBpTtZQCq6__initZ@Base 12
+ _D92TypeInfo_S3std5range__T11SortedRangeTAkVAyaa6_61203c3d2062VEQByQBx18SortedRangeOptionsi0ZQCo6__initZ@Base 12
+ _D92TypeInfo_S4core8internal2gc9pooltable__T9PoolTableTSQBqQBoQBi4impl12conservativeQCe4PoolZQBx6__initZ@Base 12
+ _D92TypeInfo_xS3std5range__T11SortedRangeTAAyaVQea5_61203c2062VEQBxQBw18SortedRangeOptionsi0ZQCn6__initZ@Base 12
+ _D93TypeInfo_HS3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBhSQBw5regex8internal2ir11CharMatcher6__initZ@Base 12
+ _D93TypeInfo_S3std3uni__T13InversionListTSQBcQBb8GcPolicyZQBh11byCodepointMFNdNfZ14CodepointRange6__initZ@Base 12
+ _D93TypeInfo_S4core8demangle__T8DemangleTSQBcQBa15reencodeMangledFNaNbNfNkMAxaZ12PrependHooksZQCl6__initZ@Base 12
+ _D93TypeInfo_xS3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpThZQCq6__initZ@Base 12
+ _D93TypeInfo_xS3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi15ZQBpTtZQCq6__initZ@Base 12
+ _D93TypeInfo_xS3std3uni__T10MultiArrayTSQzQx__T9BitPackedTkVmi8ZQrTSQCbQCa__TQBeTkVmi16ZQBpTtZQCq6__initZ@Base 12
+ _D93TypeInfo_xS4core8internal2gc9pooltable__T9PoolTableTSQBqQBoQBi4impl12conservativeQCe4PoolZQBx6__initZ@Base 12
+ _D94TypeInfo_S4core8internal8lifetime__T10emplaceRefTS3std11concurrency3TidTQxTQBaZQBrFKQBjKQBnZ1S6__initZ@Base 12
+ _D95TypeInfo_S3gcc8sections3elf18findDSOInfoForAddrFNbNiIPvPS4core3sys5linux4link12dl_phdr_infoZ2DG6__initZ@Base 12
+ _D95TypeInfo_S4core6stdcpp11string_view__T17basic_string_viewTaTSQBzQBxQBt__T11char_traitsTaZQqZQCc6__initZ@Base 12
+ _D95TypeInfo_S4core6stdcpp11string_view__T17basic_string_viewTuTSQBzQBxQBt__T11char_traitsTuZQqZQCc6__initZ@Base 12
+ _D95TypeInfo_S4core6stdcpp11string_view__T17basic_string_viewTwTSQBzQBxQBt__T11char_traitsTwZQqZQCc6__initZ@Base 12
+ _D95TypeInfo_S4core8demangle__T6mangleTFNbNiAyaMDFNbNiQkZQnbZQrZQBhFNaNbNfNkMAxaNkMAaZ11DotSplitter6__initZ@Base 12
+ _D96TypeInfo_S3std5regex8internal2ir__T15SmallFixedArrayTSQBsQBrQBoQBi__T5GroupTmZQjVki3ZQBy7Payload6__initZ@Base 12
+ _D96TypeInfo_S4core8internal8lifetime__T10emplaceRefTC3std3zip13ArchiveMemberTQzTQBcZQBtFKQBlKQBpZ1S6__initZ@Base 12
+ _D97TypeInfo_S3std8typecons__T5TupleTEQy8encoding3BOMVAyaa6_736368656d61TAhVQwa8_73657175656e6365ZQCq6__initZ@Base 12
+ _D98TypeInfo_S3std5regex8internal8thompson__T15ThompsonMatcherTaTSQCaQBzQBw2ir__T5InputTaZQjZQBw5State6__initZ@Base 12
+ _D98TypeInfo_S4core8internal8lifetime__T10emplaceRefTS3std6socket11AddressInfoTQBaTQBeZQBvFKQBnKQBrZ1S6__initZ@Base 12
+ _D98TypeInfo_yS3std8typecons__T5TupleTEQy8encoding3BOMVAyaa6_736368656d61TAhVQwa8_73657175656e6365ZQCq6__initZ@Base 12
+ _D99TypeInfo_S4core8internal8lifetime__T10emplaceRefTS3std5regexQBt2ir8BytecodeTQBbTQBfZQBwFKQBoKQBsZ1S6__initZ@Base 12
+ _D9Exception6__initZ@Base 12
+ _D9Exception6__vtblZ@Base 12
+ _D9Exception7__ClassZ@Base 12
+ _D9invariant11__moduleRefZ@Base 12
+ _D9invariant12__ModuleInfoZ@Base 12
+ _D9invariant12_d_invariantFC6ObjectZv@Base 12
+ _DTi16_D3gcc9backtrace12LibBacktrace7opApplyMxFMDFKmKxAaZiZi@Base 12
+ _DTi16_D3gcc9backtrace12LibBacktrace7opApplyMxFMDFKxAaZiZi@Base 12
+ _DTi16_D3gcc9backtrace12LibBacktrace8toStringMxFZAya@Base 12
+ _DTi16_D3std11concurrency14FiberScheduler12newConditionMFNbC4core4sync5mutex5MutexZCQyQv9condition9Condition@Base 12
+ _DTi16_D3std11concurrency14FiberScheduler5spawnMFNbDFZvZv@Base 12
+ _DTi16_D3std11concurrency14FiberScheduler5startMFDFZvZv@Base 12
+ _DTi16_D3std11concurrency14FiberScheduler5yieldMFNbZv@Base 12
+ _DTi16_D3std11concurrency14FiberScheduler8thisInfoMFNbNcNdZSQCaQBz10ThreadInfo@Base 12
+ _DTi16_D3std11concurrency15ThreadScheduler12newConditionMFNbC4core4sync5mutex5MutexZCQyQv9condition9Condition@Base 12
+ _DTi16_D3std11concurrency15ThreadScheduler5spawnMFDFZvZv@Base 12
+ _DTi16_D3std11concurrency15ThreadScheduler5startMFDFZvZv@Base 12
+ _DTi16_D3std11concurrency15ThreadScheduler5yieldMFNbZv@Base 12
+ _DTi16_D3std11concurrency15ThreadScheduler8thisInfoMFNbNcNdZSQCbQCa10ThreadInfo@Base 12
+ _DTi16_D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator10deallocateMFNbAvZb@Base 12
+ _DTi16_D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator10reallocateMFNbKAvmZb@Base 12
+ _DTi16_D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator11allocateAllMFNbZAv@Base 12
+ _DTi16_D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator13deallocateAllMFNbZb@Base 12
+ _DTi16_D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator13goodAllocSizeMFNbmZm@Base 12
+ _DTi16_D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator15alignedAllocateMFNbmkZAv@Base 12
+ _DTi16_D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator17alignedReallocateMFNbKAvmkZb@Base 12
+ _DTi16_D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator22resolveInternalPointerMFNbxPvKAvZSQEj8typecons7Ternary@Base 12
+ _DTi16_D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator4ownsMFNbAvZSQDm8typecons7Ternary@Base 12
+ _DTi16_D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator5emptyMFNbZSQDl8typecons7Ternary@Base 12
+ _DTi16_D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator6decRefMFNaNbNiNfZb@Base 12
+ _DTi16_D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator6expandMFNbKAvmZb@Base 12
+ _DTi16_D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator6incRefMFNaNbNiNfZv@Base 12
+ _DTi16_D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator8allocateMFNbmC8TypeInfoZAv@Base 12
+ _DTi16_D3std12experimental9allocator20setupThreadAllocatorFNbNcNiNfZ15ThreadAllocator9alignmentMFNbNdZk@Base 12
+ _DTi16_D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk10deallocateMOFNbAvZb@Base 12
+ _DTi16_D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk10reallocateMOFNbKAvmZb@Base 12
+ _DTi16_D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk11allocateAllMOFNbZAv@Base 12
+ _DTi16_D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk13deallocateAllMOFNbZb@Base 12
+ _DTi16_D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk13goodAllocSizeMOFNbmZm@Base 12
+ _DTi16_D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk15alignedAllocateMOFNbmkZAv@Base 12
+ _DTi16_D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk17alignedReallocateMOFNbKAvmkZb@Base 12
+ _DTi16_D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk22resolveInternalPointerMOFNbxPvKAvZSQHdQDm7Ternary@Base 12
+ _DTi16_D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk4ownsMOFNbAvZSQGgQCp7Ternary@Base 12
+ _DTi16_D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk5emptyMOFNbZSQGfQCo7Ternary@Base 12
+ _DTi16_D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk6decRefMOFNaNbNiNeZb@Base 12
+ _DTi16_D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk6expandMOFNbKAvmZb@Base 12
+ _DTi16_D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk6incRefMOFNaNbNiNfZv@Base 12
+ _DTi16_D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk8allocateMOFNbmC8TypeInfoZAv@Base 12
+ _DTi16_D3std12experimental9allocator__T20CSharedAllocatorImplTOxSQCfQCeQBt12gc_allocator11GCAllocatorVEQDr8typecons__T4FlagVAyaa8_696e646972656374ZQBdi0ZQEk9alignmentMOFNbNdZk@Base 12
+ _DTi16_D3std5regex8internal2ir__T14GenericFactorySQBqQBpQBm12backtracking19BacktrackingMatcherTaZQCm3dupMxFNeCQDyQDxQDuQDo__T7MatcherTaZQlIAaZCQFfQFeQFbQDp__TQDhTaTSQGbQGaQFxQFr__T5InputTaZQjZQEp@Base 12
+ _DTi16_D3std5regex8internal2ir__T14GenericFactorySQBqQBpQBm12backtracking19BacktrackingMatcherTaZQCm6createMxFNeKxSQEdQEcQDzQDt__T5RegexTaZQjIAaZCQFiQFhQFeQDs__TQDkTaTSQGeQGdQGaQFu__T5InputTaZQjZQEs@Base 12
+ _DTi16_D3std5regex8internal2ir__T14GenericFactorySQBqQBpQBm12backtracking19BacktrackingMatcherTaZQCm6decRefMxFNeCQEbQEaQDxQDr__T7MatcherTaZQlZm@Base 12
+ _DTi16_D3std5regex8internal2ir__T14GenericFactorySQBqQBpQBm12backtracking19BacktrackingMatcherTaZQCm6incRefMxFNfCQEbQEaQDxQDr__T7MatcherTaZQlZm@Base 12
+ _DTi16_D3std5regex8internal2ir__T14GenericFactorySQBqQBpQBm8thompson15ThompsonMatcherTaZQCd3dupMxFNeCQDpQDoQDlQDf__T7MatcherTaZQlIAaZCQEwQEvQEsQDg__TQDdTaTSQFsQFrQFoQFi__T5InputTaZQjZQEl@Base 12
+ _DTi16_D3std5regex8internal2ir__T14GenericFactorySQBqQBpQBm8thompson15ThompsonMatcherTaZQCd6createMxFNeKxSQDuQDtQDqQDk__T5RegexTaZQjIAaZCQEzQEyQEvQDj__TQDgTaTSQFvQFuQFrQFl__T5InputTaZQjZQEo@Base 12
+ _DTi16_D3std5regex8internal2ir__T14GenericFactorySQBqQBpQBm8thompson15ThompsonMatcherTaZQCd6decRefMxFNeCQDsQDrQDoQDi__T7MatcherTaZQlZm@Base 12
+ _DTi16_D3std5regex8internal2ir__T14GenericFactorySQBqQBpQBm8thompson15ThompsonMatcherTaZQCd6incRefMxFNfCQDsQDrQDoQDi__T7MatcherTaZQlZm@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe2md3MD5ZQBf3putMFNbNeMAxhXv@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe2md3MD5ZQBf5resetMFNbNeZv@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe2md3MD5ZQBf6finishMFNbAhZQd@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe2md3MD5ZQBf6finishMFNbNeZAh@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe2md3MD5ZQBf6lengthMxFNaNbNdNeZm@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki32Vmi3988292384ZQxZQCe3putMFNbNeMAxhXv@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki32Vmi3988292384ZQxZQCe5resetMFNbNeZv@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki32Vmi3988292384ZQxZQCe6finishMFNbAhZQd@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki32Vmi3988292384ZQxZQCe6finishMFNbNeZAh@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki32Vmi3988292384ZQxZQCe6lengthMxFNaNbNdNeZm@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN2882303761517117440ZQBgZQCo3putMFNbNeMAxhXv@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN2882303761517117440ZQBgZQCo5resetMFNbNeZv@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN2882303761517117440ZQBgZQCo6finishMFNbAhZQd@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN2882303761517117440ZQBgZQCo6finishMFNbNeZAh@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN2882303761517117440ZQBgZQCo6lengthMxFNaNbNdNeZm@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN3932672073523589310ZQBgZQCo3putMFNbNeMAxhXv@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN3932672073523589310ZQBgZQCo5resetMFNbNeZv@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN3932672073523589310ZQBgZQCo6finishMFNbAhZQd@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN3932672073523589310ZQBgZQCo6finishMFNbNeZAh@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3crc__T3CRCVki64VmN3932672073523589310ZQBgZQCo6lengthMxFNaNbNdNeZm@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki224ZQsZQBz3putMFNbNeMAxhXv@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki224ZQsZQBz5resetMFNbNeZv@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki224ZQsZQBz6finishMFNbAhZQd@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki224ZQsZQBz6finishMFNbNeZAh@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki224ZQsZQBz6lengthMxFNaNbNdNeZm@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki256ZQsZQBz3putMFNbNeMAxhXv@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki256ZQsZQBz5resetMFNbNeZv@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki256ZQsZQBz6finishMFNbAhZQd@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki256ZQsZQBz6finishMFNbNeZAh@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki256ZQsZQBz6lengthMxFNaNbNdNeZm@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki384ZQsZQBz3putMFNbNeMAxhXv@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki384ZQsZQBz5resetMFNbNeZv@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki384ZQsZQBz6finishMFNbAhZQd@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki384ZQsZQBz6finishMFNbNeZAh@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki384ZQsZQBz6lengthMxFNaNbNdNeZm@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki512ZQsZQBz3putMFNbNeMAxhXv@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki512ZQsZQBz5resetMFNbNeZv@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki512ZQsZQBz6finishMFNbAhZQd@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki512ZQsZQBz6finishMFNbNeZAh@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki1024Vki512ZQsZQBz6lengthMxFNaNbNdNeZm@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki160ZQrZQBy3putMFNbNeMAxhXv@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki160ZQrZQBy5resetMFNbNeZv@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki160ZQrZQBy6finishMFNbAhZQd@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki160ZQrZQBy6finishMFNbNeZAh@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki160ZQrZQBy6lengthMxFNaNbNdNeZm@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki224ZQrZQBy3putMFNbNeMAxhXv@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki224ZQrZQBy5resetMFNbNeZv@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki224ZQrZQBy6finishMFNbAhZQd@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki224ZQrZQBy6finishMFNbNeZAh@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki224ZQrZQBy6lengthMxFNaNbNdNeZm@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki256ZQrZQBy3putMFNbNeMAxhXv@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki256ZQrZQBy5resetMFNbNeZv@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki256ZQrZQBy6finishMFNbAhZQd@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki256ZQrZQBy6finishMFNbNeZAh@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe3sha__T3SHAVki512Vki256ZQrZQBy6lengthMxFNaNbNdNeZm@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe6ripemd9RIPEMD160ZQBp3putMFNbNeMAxhXv@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe6ripemd9RIPEMD160ZQBp5resetMFNbNeZv@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe6ripemd9RIPEMD160ZQBp6finishMFNbAhZQd@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe6ripemd9RIPEMD160ZQBp6finishMFNbNeZAh@Base 12
+ _DTi16_D3std6digest__T13WrapperDigestTSQBfQBe6ripemd9RIPEMD160ZQBp6lengthMxFNaNbNdNeZm@Base 12
+ _DTi16_D4core4sync5mutex5Mutex4lockMFNeZv@Base 12
+ _DTi16_D4core4sync5mutex5Mutex6unlockMFNeZv@Base 12
+ _DTi16_D4core4sync7rwmutex14ReadWriteMutex6Reader4lockMFNeZv@Base 12
+ _DTi16_D4core4sync7rwmutex14ReadWriteMutex6Reader6unlockMFNeZv@Base 12
+ _DTi16_D4core4sync7rwmutex14ReadWriteMutex6Writer4lockMFNeZv@Base 12
+ _DTi16_D4core4sync7rwmutex14ReadWriteMutex6Writer6unlockMFNeZv@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC10removeRootMFNbNiPvZv@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC11inFinalizerMFNbNiNfZb@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC11removeRangeMFNbNiPvZv@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC12profileStatsMFNbNiNeZSQDa6memory2GC12ProfileStats@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC13runFinalizersMFNbMxAvZv@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC14collectNoStackMFNbZv@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC24allocatedInCurrentThreadMFNbZm@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC4freeMFNbNiPvZv@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC5queryMFNbPvZSQCq6memory8BlkInfo_@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC5statsMFNbNiNfZSQCs6memory2GC5Stats@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC6addrOfMFNbNiPvZQd@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC6callocMFNbmkxC8TypeInfoZPv@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC6enableMFZv@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC6extendMFNbPvmmxC8TypeInfoZm@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC6mallocMFNbmkxC8TypeInfoZPv@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC6qallocMFNbmkMxC8TypeInfoZSQDd6memory8BlkInfo_@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC6sizeOfMFNbNiPvZm@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC7addRootMFNbNiPvZv@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC7clrAttrMFNbPvkZk@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC7collectMFNbZv@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC7disableMFZv@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC7getAttrMFNbPvZk@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC7reallocMFNbPvmkxC8TypeInfoZQq@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC7reserveMFNbmZm@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC7setAttrMFNbPvkZk@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC8addRangeMFNbNiPvmxC8TypeInfoZv@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC8minimizeMFNbZv@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC8rootIterMFNdNiZDFMDFNbKSQDbQCq11gcinterface4RootZiZi@Base 12
+ _DTi16_D4core8internal2gc4impl12conservativeQw14ConservativeGC9rangeIterMFNdNiZDFMDFNbKSQDcQCr11gcinterface5RangeZiZi@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC10removeRootMFNbNiPvZv@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC11inFinalizerMFNbNiNfZb@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC11removeRangeMFNbNiPvZv@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC12profileStatsMFNbNiNfZSQCk6memory2GC12ProfileStats@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC13runFinalizersMFNbMxAvZv@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC14collectNoStackMFNbZv@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC24allocatedInCurrentThreadMFNbZm@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC4freeMFNbNiPvZv@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC5queryMFNbPvZSQCa6memory8BlkInfo_@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC5statsMFNbNiNfZSQCc6memory2GC5Stats@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC6addrOfMFNbNiPvZQd@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC6callocMFNbmkMxC8TypeInfoZPv@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC6enableMFZv@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC6extendMFNbPvmmMxC8TypeInfoZm@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC6mallocMFNbmkMxC8TypeInfoZPv@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC6qallocMFNbmkMxC8TypeInfoZSQCn6memory8BlkInfo_@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC6sizeOfMFNbNiPvZm@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC7addRootMFNbNiPvZv@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC7clrAttrMFNbPvkZk@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC7collectMFNbZv@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC7disableMFZv@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC7getAttrMFNbPvZk@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC7reallocMFNbPvmkMxC8TypeInfoZQr@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC7reserveMFNbmZm@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC7setAttrMFNbPvkZk@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC8addRangeMFNbNiPvmxC8TypeInfoZv@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC8minimizeMFNbZv@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC8rootIterMFNdNiNjZDFMDFNbKSQCnQCc11gcinterface4RootZiZi@Base 12
+ _DTi16_D4core8internal2gc4impl5protoQo7ProtoGC9rangeIterMFNdNiNjZDFMDFNbKSQCoQCd11gcinterface5RangeZiZi@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC10removeRootMFNbNiPvZv@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC11inFinalizerMFNbNiNfZb@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC11removeRangeMFNbNiPvZv@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC12profileStatsMFNbNiNfZSQCm6memory2GC12ProfileStats@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC13runFinalizersMFNbMxAvZv@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC14collectNoStackMFNbZv@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC24allocatedInCurrentThreadMFNbZm@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC4freeMFNbNiPvZv@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC5queryMFNbPvZSQCc6memory8BlkInfo_@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC5statsMFNbNiNfZSQCe6memory2GC5Stats@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC6addrOfMFNbNiPvZQd@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC6callocMFNbmkxC8TypeInfoZPv@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC6enableMFZv@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC6extendMFNbPvmmxC8TypeInfoZm@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC6mallocMFNbmkxC8TypeInfoZPv@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC6qallocMFNbmkMxC8TypeInfoZSQCp6memory8BlkInfo_@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC6sizeOfMFNbNiPvZm@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC7addRootMFNbNiPvZv@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC7clrAttrMFNbPvkZk@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC7collectMFNbZv@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC7disableMFZv@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC7getAttrMFNbPvZk@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC7reallocMFNbPvmkxC8TypeInfoZQq@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC7reserveMFNbmZm@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC7setAttrMFNbPvkZk@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC8addRangeMFNbNiPvmxC8TypeInfoZv@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC8minimizeMFNbZv@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC8rootIterMFNdNiNjZDFMDFNbKSQCpQCe11gcinterface4RootZiZi@Base 12
+ _DTi16_D4core8internal2gc4impl6manualQp8ManualGC9rangeIterMFNdNiNjZDFMDFNbKSQCqQCf11gcinterface5RangeZiZi@Base 12
+ _DTi16_D4core9exception17SuppressTraceInfo7opApplyMxFMDFKmKxAaZiZi@Base 12
+ _DTi16_D4core9exception17SuppressTraceInfo7opApplyMxFMDFKxAaZiZi@Base 12
+ _DTi16_D4core9exception17SuppressTraceInfo8toStringMxFZAya@Base 12
+ _ZNKSt10bad_typeid4whatEv@Base 12
+ _ZNKSt13bad_exception4whatEv@Base 12
+ _ZNKSt8bad_cast4whatEv@Base 12
+ _ZNKSt9exception4whatEv@Base 12
+ _ZNKSt9type_info4nameEv@Base 12
+ _ZNKSt9type_info6beforeEPKS_@Base 12
+ _ZNSt9bad_allocC1Ev@Base 12
+ _ZNSt9exceptionD1Ev@Base 12
+ _ZNSt9type_infoD1Ev@Base 12
+ __CPUELT@Base 12
+ __CPUMASK@Base 12
+ __CPU_ALLOC@Base 12
+ __CPU_ALLOC_SIZE@Base 12
+ __CPU_COUNT_S@Base 12
+ __CPU_FREE@Base 12
+ __CPU_ISSET_S@Base 12
+ __CPU_SET_S@Base 12
+ __atomic_add_fetch_16@Base 12
+ __atomic_add_fetch_1@Base 12
+ __atomic_add_fetch_2@Base 12
+ __atomic_add_fetch_4@Base 12
+ __atomic_add_fetch_8@Base 12
+ __atomic_and_fetch_16@Base 12
+ __atomic_and_fetch_1@Base 12
+ __atomic_and_fetch_2@Base 12
+ __atomic_and_fetch_4@Base 12
+ __atomic_and_fetch_8@Base 12
+ __atomic_compare_exchange@Base 12
+ __atomic_compare_exchange_16@Base 12
+ __atomic_compare_exchange_1@Base 12
+ __atomic_compare_exchange_2@Base 12
+ __atomic_compare_exchange_4@Base 12
+ __atomic_compare_exchange_8@Base 12
+ __atomic_exchange@Base 12
+ __atomic_exchange_16@Base 12
+ __atomic_exchange_1@Base 12
+ __atomic_exchange_2@Base 12
+ __atomic_exchange_4@Base 12
+ __atomic_exchange_8@Base 12
+ __atomic_feraiseexcept@Base 12
+ __atomic_fetch_add_16@Base 12
+ __atomic_fetch_add_1@Base 12
+ __atomic_fetch_add_2@Base 12
+ __atomic_fetch_add_4@Base 12
+ __atomic_fetch_add_8@Base 12
+ __atomic_fetch_and_16@Base 12
+ __atomic_fetch_and_1@Base 12
+ __atomic_fetch_and_2@Base 12
+ __atomic_fetch_and_4@Base 12
+ __atomic_fetch_and_8@Base 12
+ __atomic_fetch_nand_16@Base 12
+ __atomic_fetch_nand_1@Base 12
+ __atomic_fetch_nand_2@Base 12
+ __atomic_fetch_nand_4@Base 12
+ __atomic_fetch_nand_8@Base 12
+ __atomic_fetch_or_16@Base 12
+ __atomic_fetch_or_1@Base 12
+ __atomic_fetch_or_2@Base 12
+ __atomic_fetch_or_4@Base 12
+ __atomic_fetch_or_8@Base 12
+ __atomic_fetch_sub_16@Base 12
+ __atomic_fetch_sub_1@Base 12
+ __atomic_fetch_sub_2@Base 12
+ __atomic_fetch_sub_4@Base 12
+ __atomic_fetch_sub_8@Base 12
+ __atomic_fetch_xor_16@Base 12
+ __atomic_fetch_xor_1@Base 12
+ __atomic_fetch_xor_2@Base 12
+ __atomic_fetch_xor_4@Base 12
+ __atomic_fetch_xor_8@Base 12
+ __atomic_is_lock_free@Base 12
+ __atomic_load@Base 12
+ __atomic_load_16@Base 12
+ __atomic_load_1@Base 12
+ __atomic_load_2@Base 12
+ __atomic_load_4@Base 12
+ __atomic_load_8@Base 12
+ __atomic_nand_fetch_16@Base 12
+ __atomic_nand_fetch_1@Base 12
+ __atomic_nand_fetch_2@Base 12
+ __atomic_nand_fetch_4@Base 12
+ __atomic_nand_fetch_8@Base 12
+ __atomic_or_fetch_16@Base 12
+ __atomic_or_fetch_1@Base 12
+ __atomic_or_fetch_2@Base 12
+ __atomic_or_fetch_4@Base 12
+ __atomic_or_fetch_8@Base 12
+ __atomic_store@Base 12
+ __atomic_store_16@Base 12
+ __atomic_store_1@Base 12
+ __atomic_store_2@Base 12
+ __atomic_store_4@Base 12
+ __atomic_store_8@Base 12
+ __atomic_sub_fetch_16@Base 12
+ __atomic_sub_fetch_1@Base 12
+ __atomic_sub_fetch_2@Base 12
+ __atomic_sub_fetch_4@Base 12
+ __atomic_sub_fetch_8@Base 12
+ __atomic_test_and_set_16@Base 12
+ __atomic_test_and_set_1@Base 12
+ __atomic_test_and_set_2@Base 12
+ __atomic_test_and_set_4@Base 12
+ __atomic_test_and_set_8@Base 12
+ __atomic_xor_fetch_16@Base 12
+ __atomic_xor_fetch_1@Base 12
+ __atomic_xor_fetch_2@Base 12
+ __atomic_xor_fetch_4@Base 12
+ __atomic_xor_fetch_8@Base 12
+ __gdc_begin_catch@Base 12
+ __gdc_personality_v0@Base 12
+ _aApplyRcd1@Base 12
+ _aApplyRcd2@Base 12
+ _aApplyRcw1@Base 12
+ _aApplyRcw2@Base 12
+ _aApplyRdc1@Base 12
+ _aApplyRdc2@Base 12
+ _aApplyRdw1@Base 12
+ _aApplyRdw2@Base 12
+ _aApplyRwc1@Base 12
+ _aApplyRwc2@Base 12
+ _aApplyRwd1@Base 12
+ _aApplyRwd2@Base 12
+ _aApplycd1@Base 12
+ _aApplycd2@Base 12
+ _aApplycw1@Base 12
+ _aApplycw2@Base 12
+ _aApplydc1@Base 12
+ _aApplydc2@Base 12
+ _aApplydw1@Base 12
+ _aApplydw2@Base 12
+ _aApplywc1@Base 12
+ _aApplywc2@Base 12
+ _aApplywd1@Base 12
+ _aApplywd2@Base 12
+ _aaApply2@Base 12
+ _aaApply@Base 12
+ _aaClear@Base 12
+ _aaDelX@Base 12
+ _aaEqual@Base 12
+ _aaGetHash@Base 12
+ _aaGetRvalueX@Base 12
+ _aaGetX@Base 12
+ _aaGetY@Base 12
+ _aaInX@Base 12
+ _aaKeys@Base 12
+ _aaLen@Base 12
+ _aaRange@Base 12
+ _aaRangeEmpty@Base 12
+ _aaRangeFrontKey@Base 12
+ _aaRangeFrontValue@Base 12
+ _aaRangePopFront@Base 12
+ _aaRehash@Base 12
+ _aaValues@Base 12
+ _aaVersion@Base 12
+ _adEq2@Base 12
+ _adSort@Base 12
+ _d_allocmemory@Base 12
+ _d_arrayappendT@Base 12
+ _d_arrayappendcTX@Base 12
+ _d_arrayappendcd@Base 12
+ _d_arrayappendwd@Base 12
+ _d_arrayassign@Base 12
+ _d_arrayassign_l@Base 12
+ _d_arrayassign_r@Base 12
+ _d_arraybounds@Base 12
+ _d_arraybounds_index@Base 12
+ _d_arraybounds_indexp@Base 12
+ _d_arraybounds_slice@Base 12
+ _d_arraybounds_slicep@Base 12
+ _d_arrayboundsp@Base 12
+ _d_arraycatT@Base 12
+ _d_arraycatnTX@Base 12
+ _d_arraycopy@Base 12
+ _d_arrayctor@Base 12
+ _d_arrayliteralTX@Base 12
+ _d_arraysetassign@Base 12
+ _d_arraysetcapacity@Base 12
+ _d_arraysetctor@Base 12
+ _d_arraysetlengthT@Base 12
+ _d_arraysetlengthiT@Base 12
+ _d_arrayshrinkfit@Base 12
+ _d_assert@Base 12
+ _d_assert_msg@Base 12
+ _d_assertp@Base 12
+ _d_assocarrayliteralTX@Base 12
+ _d_callfinalizer@Base 12
+ _d_callinterfacefinalizer@Base 12
+ _d_createTrace@Base 12
+ _d_critical_init@Base 12
+ _d_critical_term@Base 12
+ _d_criticalenter2@Base 12
+ _d_criticalenter@Base 12
+ _d_criticalexit@Base 12
+ _d_delThrowable@Base 12
+ _d_delarray_t@Base 12
+ _d_delclass@Base 12
+ _d_delinterface@Base 12
+ _d_delmemory@Base 12
+ _d_delstruct@Base 12
+ _d_dso_registry@Base 12
+ _d_dynamic_cast@Base 12
+ _d_eh_swapContext@Base 12
+ _d_initMonoTime@Base 12
+ _d_interface_cast@Base 12
+ _d_isbaseof2@Base 12
+ _d_isbaseof@Base 12
+ _d_monitor_staticctor@Base 12
+ _d_monitor_staticdtor@Base 12
+ _d_monitordelete@Base 12
+ _d_monitordelete_nogc@Base 12
+ _d_monitorenter@Base 12
+ _d_monitorexit@Base 12
+ _d_newThrowable@Base 12
+ _d_newarrayT@Base 12
+ _d_newarrayU@Base 12
+ _d_newarrayiT@Base 12
+ _d_newarraymTX@Base 12
+ _d_newarraymiTX@Base 12
+ _d_newclass@Base 12
+ _d_newitemT@Base 12
+ _d_newitemU@Base 12
+ _d_newitemiT@Base 12
+ _d_print_throwable@Base 12
+ _d_register_conservative_gc@Base 12
+ _d_register_manual_gc@Base 12
+ _d_register_precise_gc@Base 12
+ _d_run_main2@Base 12
+ _d_run_main@Base 12
+ _d_setSameMutex@Base 12
+ _d_throw@Base 12
+ _d_toObject@Base 12
+ _d_traceContext@Base 12
+ _d_unittest@Base 12
+ _d_unittest_msg@Base 12
+ _d_unittestp@Base 12
+ atomic_flag_clear@Base 12
+ atomic_flag_clear_explicit@Base 12
+ atomic_flag_test_and_set@Base 12
+ atomic_flag_test_and_set_explicit@Base 12
+ atomic_signal_fence@Base 12
+ atomic_thread_fence@Base 12
+ backtrace_alloc@Base 12
+ backtrace_close@Base 12
+ backtrace_create_state@Base 12
+ backtrace_dwarf_add@Base 12
+ backtrace_free@Base 12
+ backtrace_full@Base 12
+ backtrace_get_view@Base 12
+ backtrace_initialize@Base 12
+ backtrace_open@Base 12
+ backtrace_pcinfo@Base 12
+ backtrace_print@Base 12
+ backtrace_qsort@Base 12
+ backtrace_release_view@Base 12
+ backtrace_simple@Base 12
+ backtrace_syminfo@Base 12
+ backtrace_syminfo_to_full_callback@Base 12
+ backtrace_syminfo_to_full_error_callback@Base 12
+ backtrace_uncompress_lzma@Base 12
+ backtrace_uncompress_zdebug@Base 12
+ backtrace_vector_finish@Base 12
+ backtrace_vector_grow@Base 12
+ backtrace_vector_release@Base 12
+ cimag@Base 12
+ cimagf@Base 12
+ cimagl@Base 12
+ creald@Base 12
+ crealf@Base 12
+ creall@Base 12
+ deflateInit2@Base 12
+ deflateInit@Base 12
+ fakePureErrnoImpl@Base 12
+ fakePureReprintReal@Base 12
+ fiber_entryPoint@Base 12
+ fiber_switchContext@Base 12
+ gc_addRange@Base 12
+ gc_addRoot@Base 12
+ gc_addrOf@Base 12
+ gc_allocatedInCurrentThread@Base 12
+ gc_calloc@Base 12
+ gc_clrAttr@Base 12
+ gc_clrProxy@Base 12
+ gc_collect@Base 12
+ gc_disable@Base 12
+ gc_enable@Base 12
+ gc_extend@Base 12
+ gc_free@Base 12
+ gc_getAttr@Base 12
+ gc_getProxy@Base 12
+ gc_inFinalizer@Base 12
+ gc_init@Base 12
+ gc_init_nothrow@Base 12
+ gc_malloc@Base 12
+ gc_minimize@Base 12
+ gc_profileStats@Base 12
+ gc_qalloc@Base 12
+ gc_query@Base 12
+ gc_realloc@Base 12
+ gc_removeRange@Base 12
+ gc_removeRoot@Base 12
+ gc_reserve@Base 12
+ gc_runFinalizers@Base 12
+ gc_setAttr@Base 12
+ gc_setProxy@Base 12
+ gc_sizeOf@Base 12
+ gc_stats@Base 12
+ gc_term@Base 12
+ getErrno@Base 12
+ inflateBackInit@Base 12
+ inflateInit2@Base 12
+ inflateInit@Base 12
+ libat_lock_n@Base 12
+ libat_unlock_n@Base 12
+ lifetime_init@Base 12
+ onArrayIndexError@Base 12
+ onArraySliceError@Base 12
+ onAssertError@Base 12
+ onAssertErrorMsg@Base 12
+ onFinalizeError@Base 12
+ onForkError@Base 12
+ onInvalidMemoryOperationError@Base 12
+ onOutOfMemoryError@Base 12
+ onOutOfMemoryErrorNoGC@Base 12
+ onRangeError@Base 12
+ onUnicodeError@Base 12
+ onUnittestErrorMsg@Base 12
+ pcinfoCallback@Base 12
+ pcinfoErrorCallback@Base 12
+ perf_event_open@Base 12
+ profilegc_setlogfilename@Base 12
+ register_default_gcs@Base 12
+ rt_args@Base 12
+ rt_attachDisposeEvent@Base 12
+ rt_cArgs@Base 12
+ rt_cmdline_enabled@Base 12
+ rt_detachDisposeEvent@Base 12
+ rt_envvars_enabled@Base 12
+ rt_finalize2@Base 12
+ rt_finalize@Base 12
+ rt_finalizeFromGC@Base 12
+ rt_getCollectHandler@Base 12
+ rt_getTraceHandler@Base 12
+ rt_hasFinalizerInSegment@Base 12
+ rt_init@Base 12
+ rt_loadLibrary@Base 12
+ rt_moduleCtor@Base 12
+ rt_moduleDtor@Base 12
+ rt_moduleTlsCtor@Base 12
+ rt_moduleTlsDtor@Base 12
+ rt_options@Base 12
+ rt_setCollectHandler@Base 12
+ rt_setTraceHandler@Base 12
+ rt_term@Base 12
+ rt_trapExceptions@Base 12
+ rt_unloadLibrary@Base 12
+ runModuleUnitTests@Base 12
+ setErrno@Base 12
+ simpleCallback@Base 12
+ simpleErrorCallback@Base 12
+ syminfoCallback2@Base 12
+ syminfoCallback@Base 12
+ thread_attachThis@Base 12
+ thread_detachByAddr@Base 12
+ thread_detachInstance@Base 12
+ thread_detachThis@Base 12
+ thread_enterCriticalRegion@Base 12
+ thread_entryPoint@Base 12
+ thread_exitCriticalRegion@Base 12
+ thread_inCriticalRegion@Base 12
+ thread_init@Base 12
+ thread_isMainThread@Base 12
+ thread_joinAll@Base 12
+ thread_processGCMarks@Base 12
+ thread_resumeAll@Base 12
+ thread_resumeHandler@Base 12
+ thread_scanAll@Base 12
+ thread_scanAllType@Base 12
+ thread_setGCSignals@Base 12
+ thread_setThis@Base 12
+ thread_stackBottom@Base 12
+ thread_stackTop@Base 12
+ thread_suspendAll@Base 12
+ thread_suspendHandler@Base 12
+ thread_term@Base 12
+ tipc_addr@Base 12
+ tipc_cluster@Base 12
+ tipc_node@Base 12
+ tipc_zone@Base 12
--- /dev/null
+# DP: updates from the 12 branch upto 20231008 (19b4b319a21).
+
+LANG=C git diff --no-renames --src-prefix=a/src/ --dst-prefix=b/src/ \
+ 8fc1a49c9312b05d925b7d21f1d2145d70818151 19b4b319a21bfcd6da7fe8e9b5bbec003db26691 \
+ | awk '/^diff .*\.texi/ {skip=1; next} /^diff / { skip=0 } skip==0' \
+ | grep -v -E '^(diff|index)'
+
+--- a/src/gcc/ChangeLog
++++ b/src/gcc/ChangeLog
+@@ -1,3 +1,1280 @@
++2023-10-07 Andrew Pinski <pinskia@gmail.com>
++
++ Backported from master:
++ 2023-10-06 Andrew Pinski <pinskia@gmail.com>
++
++ PR middle-end/111699
++ * match.pd ((c ? a : b) op d, (c ? a : b) op (c ? d : e),
++ (v ? w : 0) ? a : b, c1 ? c2 ? a : b : b): Enable only for GIMPLE.
++
++2023-10-02 Pat Haugen <pthaugen@linux.ibm.com>
++
++ Backported from master:
++ 2023-09-19 Pat Haugen <pthaugen@linux.ibm.com>
++
++ * config/rs6000/rs6000.cc (rs6000_rtx_costs): Check whether the
++ modulo instruction is disabled.
++ * config/rs6000/rs6000.h (RS6000_DISABLE_SCALAR_MODULO): New.
++ * config/rs6000/rs6000.md (mod<mode>3, *mod<mode>3): Check it.
++ (define_expand umod<mode>3): New.
++ (define_insn umod<mode>3): Rename to *umod<mode>3 and check if the modulo
++ instruction is disabled.
++ (umodti3, modti3): Check if the modulo instruction is disabled.
++
++2023-09-29 Wilco Dijkstra <wilco.dijkstra@arm.com>
++
++ Backported from master:
++ 2023-09-28 Wilco Dijkstra <wilco.dijkstra@arm.com>
++
++ PR target/111121
++ * config/aarch64/aarch64.md (aarch64_movmemdi): Add new expander.
++ (movmemdi): Call aarch64_expand_cpymem_mops for correct expansion.
++ * config/aarch64/aarch64.cc (aarch64_expand_cpymem_mops): Add support
++ for memmove.
++ * config/aarch64/aarch64-protos.h (aarch64_expand_cpymem_mops): Add new
++ function.
++
++2023-09-26 Eric Botcazou <ebotcazou@adacore.com>
++
++ * gimple-range-gori.cc (gori_compute::logical_combine): Add missing
++ return statement in the varying case.
++
++2023-09-20 Richard Sandiford <richard.sandiford@arm.com>
++
++ Backported from master:
++ 2023-09-15 Richard Sandiford <richard.sandiford@arm.com>
++
++ PR target/111411
++ * config/aarch64/aarch64.cc (aarch64_operands_ok_for_ldpstp): Require
++ the lower memory access to a mem-pair operand.
++
++2023-09-20 Richard Sandiford <richard.sandiford@arm.com>
++
++ Backported from master:
++ 2023-08-31 Richard Sandiford <richard.sandiford@arm.com>
++
++ * config/aarch64/aarch64.md (untyped_call): Emit a call_value
++ rather than a call. List each possible destination register
++ in the call pattern.
++
++2023-09-12 Uros Bizjak <ubizjak@gmail.com>
++
++ PR target/111340
++ * config/i386/i386.cc (output_pic_addr_const): Handle CONST_WIDE_INT.
++ Call output_addr_const for CASE_CONST_SCALAR_INT.
++
++2023-09-12 Richard Sandiford <richard.sandiford@arm.com>
++
++ * config/aarch64/aarch64.cc (aarch64_save_regs_above_locals_p):
++ New function.
++ (aarch64_layout_frame): Use it to decide whether locals should
++ go above or below the saved registers.
++ (aarch64_expand_prologue): Update stack layout comment.
++ Emit a stack tie after the final adjustment.
++
++2023-09-12 Richard Sandiford <richard.sandiford@arm.com>
++
++ * config/aarch64/aarch64.h (aarch64_frame::saved_regs_size)
++ (aarch64_frame::below_hard_fp_saved_regs_size): Delete.
++ * config/aarch64/aarch64.cc (aarch64_layout_frame): Update accordingly.
++
++2023-09-12 Richard Sandiford <richard.sandiford@arm.com>
++
++ * config/aarch64/aarch64.h (aarch64_frame::sve_save_and_probe)
++ (aarch64_frame::hard_fp_save_and_probe): New fields.
++ * config/aarch64/aarch64.cc (aarch64_layout_frame): Initialize them.
++ Rather than asserting that a leaf function saves LR, instead assert
++ that a leaf function saves something.
++ (aarch64_get_separate_components): Prevent the chosen probe
++ registers from being individually shrink-wrapped.
++ (aarch64_allocate_and_probe_stack_space): Remove workaround for
++ probe registers that aren't at the bottom of the previous allocation.
++
++2023-09-12 Richard Sandiford <richard.sandiford@arm.com>
++
++ * config/aarch64/aarch64.cc (aarch64_allocate_and_probe_stack_space):
++ Always probe the residual allocation at offset 1024, asserting
++ that that is in range.
++
++2023-09-12 Richard Sandiford <richard.sandiford@arm.com>
++
++ * config/aarch64/aarch64.cc (aarch64_layout_frame): Ensure that
++ the LR save slot is in the first 16 bytes of the register save area.
++ Only form STP/LDP push/pop candidates if both registers are valid.
++ (aarch64_allocate_and_probe_stack_space): Remove workaround for
++ when LR was not in the first 16 bytes.
++
++2023-09-12 Richard Sandiford <richard.sandiford@arm.com>
++
++ * config/aarch64/aarch64.cc (aarch64_allocate_and_probe_stack_space):
++ Don't probe final allocations that are exactly 1KiB in size (after
++ unprobed space above the final allocation has been deducted).
++
++2023-09-12 Richard Sandiford <richard.sandiford@arm.com>
++
++ * config/aarch64/aarch64.cc (aarch64_layout_frame): Tweak
++ calculation of initial_adjust for frames in which all saves
++ are SVE saves.
++
++2023-09-12 Richard Sandiford <richard.sandiford@arm.com>
++
++ * config/aarch64/aarch64.cc (aarch64_layout_frame): Simplify
++ the allocation of the top of the frame.
++
++2023-09-12 Richard Sandiford <richard.sandiford@arm.com>
++
++ * config/aarch64/aarch64.h (aarch64_frame): Add comment above
++ reg_offset.
++ * config/aarch64/aarch64.cc (aarch64_layout_frame): Walk offsets
++ from the bottom of the frame, rather than the bottom of the saved
++ register area. Measure reg_offset from the bottom of the frame
++ rather than the bottom of the saved register area.
++ (aarch64_save_callee_saves): Update accordingly.
++ (aarch64_restore_callee_saves): Likewise.
++ (aarch64_get_separate_components): Likewise.
++ (aarch64_process_components): Likewise.
++
++2023-09-12 Richard Sandiford <richard.sandiford@arm.com>
++
++ * config/aarch64/aarch64.h (aarch64_frame::frame_size): Tweak comment.
++
++2023-09-12 Richard Sandiford <richard.sandiford@arm.com>
++
++ * config/aarch64/aarch64.h (aarch64_frame::hard_fp_offset): Rename
++ to...
++ (aarch64_frame::bytes_above_hard_fp): ...this.
++ * config/aarch64/aarch64.cc (aarch64_layout_frame)
++ (aarch64_expand_prologue): Update accordingly.
++ (aarch64_initial_elimination_offset): Likewise.
++
++2023-09-12 Richard Sandiford <richard.sandiford@arm.com>
++
++ * config/aarch64/aarch64.h (aarch64_frame::locals_offset): Rename to...
++ (aarch64_frame::bytes_above_locals): ...this.
++ * config/aarch64/aarch64.cc (aarch64_layout_frame)
++ (aarch64_initial_elimination_offset): Update accordingly.
++
++2023-09-12 Richard Sandiford <richard.sandiford@arm.com>
++
++ * config/aarch64/aarch64.cc (aarch64_expand_prologue): Move the
++ calculation of chain_offset into the emit_frame_chain block.
++
++2023-09-12 Richard Sandiford <richard.sandiford@arm.com>
++
++ * config/aarch64/aarch64.h (aarch64_frame::callee_offset): Delete.
++ * config/aarch64/aarch64.cc (aarch64_layout_frame): Remove
++ callee_offset handling.
++ (aarch64_save_callee_saves): Replace the start_offset parameter
++ with a bytes_below_sp parameter.
++ (aarch64_restore_callee_saves): Likewise.
++ (aarch64_expand_prologue): Update accordingly.
++ (aarch64_expand_epilogue): Likewise.
++
++2023-09-12 Richard Sandiford <richard.sandiford@arm.com>
++
++ * config/aarch64/aarch64.h (aarch64_frame::bytes_below_hard_fp): New
++ field.
++ * config/aarch64/aarch64.cc (aarch64_layout_frame): Initialize it.
++ (aarch64_expand_epilogue): Use it instead of
++ below_hard_fp_saved_regs_size.
++
++2023-09-12 Richard Sandiford <richard.sandiford@arm.com>
++
++ * config/aarch64/aarch64.h (aarch64_frame::bytes_below_saved_regs): New
++ field.
++ * config/aarch64/aarch64.cc (aarch64_layout_frame): Initialize it,
++ and use it instead of crtl->outgoing_args_size.
++ (aarch64_get_separate_components): Use bytes_below_saved_regs instead
++ of outgoing_args_size.
++ (aarch64_process_components): Likewise.
++
++2023-09-12 Richard Sandiford <richard.sandiford@arm.com>
++
++ * config/aarch64/aarch64.cc (aarch64_layout_frame): Explicitly
++ allocate the frame in one go if there are no saved registers.
++
++2023-09-12 Richard Sandiford <richard.sandiford@arm.com>
++
++ * config/aarch64/aarch64.cc (aarch64_expand_prologue): Use
++ chain_offset rather than callee_offset.
++
++2023-09-12 Richard Sandiford <richard.sandiford@arm.com>
++
++ * config/aarch64/aarch64.cc (aarch64_save_callee_saves): Use
++ a local shorthand for cfun->machine->frame.
++ (aarch64_restore_callee_saves, aarch64_get_separate_components):
++ (aarch64_process_components): Likewise.
++ (aarch64_allocate_and_probe_stack_space): Likewise.
++ (aarch64_expand_prologue, aarch64_expand_epilogue): Likewise.
++ (aarch64_layout_frame): Use existing shorthand for one more case.
++
++2023-09-12 Haochen Gui <guihaoc@gcc.gnu.org>
++
++ Backported from master:
++ 2023-08-31 Haochen Gui <guihaoc@gcc.gnu.org>
++
++ PR target/96762
++ * config/rs6000/rs6000-string.cc (expand_block_move): Call vector
++ load/store with length only on 64-bit Power10.
++
++2023-09-11 liuhongt <hongtao.liu@intel.com>
++
++ Backported from master:
++ 2023-09-11 liuhongt <hongtao.liu@intel.com>
++
++ PR target/111306
++ PR target/111335
++ * config/i386/sse.md (int_comm): New int_attr.
++ (fma_<complexopname>_<mode><sdc_maskz_name><round_name>):
++ Remove % for Complex conjugate operations since they're not
++ commutative.
++ (fma_<complexpairopname>_<mode>_pair): Ditto.
++ (<avx512>_<complexopname>_<mode>_mask<round_name>): Ditto.
++ (cmul<conj_op><mode>3): Ditto.
++
++2023-09-01 Tobias Burnus <tobias@codesourcery.com>
++
++ Backported from master:
++ 2023-08-19 Tobias Burnus <tobias@codesourcery.com>
++
++ PR middle-end/111017
++ * omp-expand.cc (expand_omp_for_init_vars): Pass after=true
++ to expand_omp_build_cond for 'factor != 0' condition, resulting
++ in pre-r12-5295-g47de0b56ee455e code for the gimple insert.
++
++2023-09-01 Lulu Cheng <chenglulu@loongson.cn>
++
++ Backported from master:
++ 2023-09-01 Lulu Cheng <chenglulu@loongson.cn>
++ Guo Jie <guojie@loongson.cn>
++
++ PR target/110484
++ * config/loongarch/loongarch.cc (loongarch_emit_stack_tie): Use the
++ frame_pointer_needed to determine whether to use the $fp register.
++
++2023-08-30 Jakub Jelinek <jakub@redhat.com>
++
++ Backported from master:
++ 2023-08-30 Jakub Jelinek <jakub@redhat.com>
++
++ PR tree-optimization/110914
++ * tree-ssa-strlen.cc (strlen_pass::handle_builtin_memcpy): Don't call
++ adjust_last_stmt unless len is known constant.
++
++2023-08-30 Jakub Jelinek <jakub@redhat.com>
++
++ Backported from master:
++ 2023-08-30 Jakub Jelinek <jakub@redhat.com>
++
++ PR tree-optimization/111015
++ * gimple-ssa-store-merging.cc
++ (imm_store_chain_info::output_merged_store): Use wi::mask and
++ wide_int_to_tree instead of unsigned HOST_WIDE_INT shift and
++ build_int_cst to build BIT_AND_EXPR mask.
++
++2023-08-19 Guo Jie <guojie@loongson.cn>
++
++ Backported from master:
++ 2023-08-19 Guo Jie <guojie@loongson.cn>
++ Lulu Cheng <chenglulu@loongson.cn>
++
++ * config/loongarch/t-loongarch: Add loongarch-driver.h into
++ TM_H. Add loongarch-def.h and loongarch-tune.h into
++ OPTIONS_H_EXTRA.
++
++2023-08-16 liuhongt <hongtao.liu@intel.com>
++
++ Backported from master:
++ 2023-08-16 liuhongt <hongtao.liu@intel.com>
++
++ * config/i386/i386-builtins.cc
++ (ix86_vectorize_builtin_gather): Adjust for use_gather_8parts.
++ * config/i386/i386-options.cc (parse_mtune_ctrl_str):
++ Set/Clear tune features use_{gather,scatter}_{2parts, 4parts,
++ 8parts} for -mtune-crtl={,^}{use_gather,use_scatter}.
++ * config/i386/i386.cc (ix86_vectorize_builtin_scatter): Adjust
++ for use_scatter_8parts
++ * config/i386/i386.h (TARGET_USE_GATHER): Rename to ..
++ (TARGET_USE_GATHER_8PARTS): .. this.
++ (TARGET_USE_SCATTER): Rename to ..
++ (TARGET_USE_SCATTER_8PARTS): .. this.
++ * config/i386/x86-tune.def (X86_TUNE_USE_GATHER): Rename to
++ (X86_TUNE_USE_GATHER_8PARTS): .. this.
++ (X86_TUNE_USE_SCATTER): Rename to
++ (X86_TUNE_USE_SCATTER_8PARTS): .. this.
++ * config/i386/i386.opt: Add new options mgather, mscatter.
++
++2023-08-16 liuhongt <hongtao.liu@intel.com>
++
++ Backported from master:
++ 2023-08-16 liuhongt <hongtao.liu@intel.com>
++
++ * config/i386/i386-options.cc (m_GDS): New macro.
++ * config/i386/x86-tune.def (X86_TUNE_USE_GATHER_2PARTS): Don't
++ enable for m_GDS.
++ (X86_TUNE_USE_GATHER_4PARTS): Ditto.
++ (X86_TUNE_USE_GATHER): Ditto.
++
++2023-08-09 liuhongt <hongtao.liu@intel.com>
++
++ * common/config/i386/cpuinfo.h (get_available_features): Check
++ max_subleaf_level for valid subleaf before use CPUID.
++
++2023-08-01 Kewen Lin <linkw@linux.ibm.com>
++
++ Backported from master:
++ 2023-07-26 Kewen Lin <linkw@linux.ibm.com>
++
++ PR target/110741
++ * config/rs6000/vsx.md (define_insn xxeval): Correct vsx
++ operands output with "x".
++
++2023-07-14 Uros Bizjak <ubizjak@gmail.com>
++
++ Backported from master:
++ 2023-07-14 Uros Bizjak <ubizjak@gmail.com>
++
++ PR target/110206
++ * fwprop.cc (contains_paradoxical_subreg_p): Move to ...
++ * rtlanal.cc (contains_paradoxical_subreg_p): ... here.
++ * rtlanal.h (contains_paradoxical_subreg_p): Add prototype.
++ * cprop.cc (try_replace_reg): Do not set REG_EQUAL note
++ when the original source contains a paradoxical subreg.
++
++2023-07-14 Oleg Endo <olegendo@gcc.gnu.org>
++
++ PR target/101469
++ * config/sh/sh.md (peephole2): Handle case where eliminated reg
++ is also used by the address of the following memory operand.
++
++2023-07-13 Uros Bizjak <ubizjak@gmail.com>
++
++ Backported from master:
++ 2023-07-13 Uros Bizjak <ubizjak@gmail.com>
++
++ PR target/106966
++ * config/alpha/alpha.cc (alpha_emit_set_long_const):
++ Always use DImode when constructing long const.
++
++2023-07-08 Jonathan Wakely <jwakely@redhat.com>
++
++ Backported from master:
++ 2023-07-08 Jonathan Wakely <jwakely@redhat.com>
++
++ PR c++/110595
++ * doc/invoke.texi (Warning Options): Fix typo.
++
++2023-07-05 Michael Meissner <meissner@linux.ibm.com>
++
++ Backported from master:
++ 2023-06-23 Michael Meissner <meissner@linux.ibm.com>
++ Aaron Sawdey <acsawdey@linux.ibm.com>
++
++ PR target/105325
++ * config/rs6000/genfusion.pl (gen_ld_cmpi_p10_one): Fix problems that
++ allowed prefixed lwa to be generated.
++ * config/rs6000/fusion.md: Regenerate.
++ * config/rs6000/predicates.md (ds_form_mem_operand): Delete.
++ * config/rs6000/rs6000.md (prefixed attribute): Add support for load
++ plus compare immediate fused insns.
++ (maybe_prefixed): Likewise.
++
++2023-07-05 Segher Boessenkool <segher@kernel.crashing.org>
++
++ Backported from master:
++ 2023-06-06 Segher Boessenkool <segher@kernel.crashing.org>
++
++ * config/rs6000/genfusion.pl (gen_ld_cmpi_p10_one): New, rewritten and
++ split out from...
++ (gen_ld_cmpi_p10): ... this.
++
++2023-07-04 Cui, Lili <lili.cui@intel.com>
++
++ * common/config/i386/cpuinfo.h (get_intel_cpu): Remove model value 0xa8
++ from Rocketlake, remove model value 0xbf from Alderlake.
++
++2023-06-30 Eric Botcazou <ebotcazou@adacore.com>
++
++ * gimple-fold.cc (fold_array_ctor_reference): Fix head comment.
++ (fold_nonarray_ctor_reference): Likewise. Specifically deal
++ with integral bit-fields.
++ (fold_ctor_reference): Make sure that the constructor uses the
++ native storage order.
++
++2023-06-29 liuhongt <hongtao.liu@intel.com>
++
++ PR rtl-optimization/110237
++ * config/i386/sse.md (<avx512>_store<mode>_mask): Refine with
++ UNSPEC_MASKMOV.
++ (maskstore<mode><avx512fmaskmodelower): Ditto.
++ (*<avx512>_store<mode>_mask): New define_insn, it's renamed
++ from original <avx512>_store<mode>_mask.
++
++2023-06-29 liuhongt <hongtao.liu@intel.com>
++
++ PR target/110309
++ * config/i386/sse.md (maskload<mode><avx512fmaskmodelower>):
++ Refine pattern with UNSPEC_MASKLOAD.
++ (maskload<mode><avx512fmaskmodelower>): Ditto.
++ (*<avx512>_load<mode>_mask): Extend mode iterator to
++ VI12HF_AVX512VL.
++ (*<avx512>_load<mode>): Ditto.
++
++2023-06-29 Hongyu Wang <hongyu.wang@intel.com>
++
++ Backported from master:
++ 2023-06-26 Hongyu Wang <hongyu.wang@intel.com>
++
++ * config/i386/i386-options.cc (ix86_valid_target_attribute_tree):
++ Override tune_string with arch_string if tune_string is not
++ explicitly specified.
++
++2023-06-28 Thomas Schwinge <thomas@codesourcery.com>
++
++ Backported from master:
++ 2023-06-02 Thomas Schwinge <thomas@codesourcery.com>
++
++ PR testsuite/66005
++ * doc/install.texi: Document (optional) Perl usage for parallel
++ testing of libgomp.
++
++2023-06-28 liuhongt <hongtao.liu@intel.com>
++
++ * config/i386/i386-features.cc (pass_insert_vzeroupper:gate):
++ Move flag_expensive_optimizations && !optimize_size to ..
++ * config/i386/i386-options.cc (ix86_option_override_internal):
++ .. this, it makes -mvzeroupper independent of optimization
++ level, but still keeps the behavior of architecture
++ tuning(emit_vzeroupper) unchanged.
++
++2023-06-27 Andrew Pinski <apinski@marvell.com>
++
++ Backported from master:
++ 2023-06-27 Andrew Pinski <apinski@marvell.com>
++
++ PR middle-end/110420
++ PR middle-end/103979
++ PR middle-end/98619
++ * gimplify.cc (gimplify_asm_expr): Mark asm with labels as volatile.
++
++2023-06-23 Richard Biener <rguenther@suse.de>
++
++ Backported from master:
++ 2023-06-19 Richard Biener <rguenther@suse.de>
++
++ PR tree-optimization/110298
++ * tree-ssa-loop-ivcanon.cc (tree_unroll_loops_completely):
++ Clear number of iterations info before cleaning up the CFG.
++
++2023-06-23 Richard Biener <rguenther@suse.de>
++
++ Backported from master:
++ 2023-06-09 Richard Biener <rguenther@suse.de>
++
++ PR middle-end/110182
++ * match.pd (two conversions in a row): Use element_precision
++ to DTRT for VECTOR_TYPE.
++
++2023-06-22 Alex Coplan <alex.coplan@arm.com>
++
++ Backported from master:
++ 2023-06-07 Alex Coplan <alex.coplan@arm.com>
++
++ PR target/110132
++ * config/aarch64/aarch64-builtins.cc (aarch64_general_simulate_builtin):
++ New. Use it ...
++ (aarch64_init_ls64_builtins): ... here. Switch to declaring public ACLE
++ names for builtins.
++ (aarch64_general_init_builtins): Ensure we invoke the arm_acle.h
++ setup if in_lto_p, just like we do for SVE.
++ * config/aarch64/arm_acle.h: (__arm_ld64b): Delete.
++ (__arm_st64b): Delete.
++ (__arm_st64bv): Delete.
++ (__arm_st64bv0): Delete.
++
++2023-06-22 Alex Coplan <alex.coplan@arm.com>
++
++ Backported from master:
++ 2023-06-07 Alex Coplan <alex.coplan@arm.com>
++
++ PR target/110100
++ * config/aarch64/aarch64-builtins.cc (aarch64_expand_builtin_ls64):
++ Use input operand for the destination address.
++ * config/aarch64/aarch64.md (st64b): Fix constraint on address
++ operand.
++
++2023-06-22 Alex Coplan <alex.coplan@arm.com>
++
++ Backported from master:
++ 2023-06-07 Alex Coplan <alex.coplan@arm.com>
++
++ PR target/110100
++ * config/aarch64/aarch64-builtins.cc (aarch64_init_ls64_builtins_types):
++ Replace eight consecutive spaces with tabs.
++ (aarch64_init_ls64_builtins): Likewise.
++ (aarch64_expand_builtin_ls64): Likewise.
++ * config/aarch64/aarch64.md (ld64b): Likewise.
++ (st64b): Likewise.
++ (st64bv): Likewise
++ (st64bv0): Likewise.
++
++2023-06-20 Kewen Lin <linkw@linux.ibm.com>
++
++ Backported from master:
++ 2023-06-12 Kewen Lin <linkw@linux.ibm.com>
++
++ PR target/109932
++ * config/rs6000/rs6000-builtins.def (__builtin_pack_vector_int128,
++ __builtin_unpack_vector_int128): Move from stanza power7 to vsx.
++
++2023-06-20 Kewen Lin <linkw@linux.ibm.com>
++
++ Backported from master:
++ 2023-06-12 Kewen Lin <linkw@linux.ibm.com>
++
++ PR target/110011
++ * config/rs6000/rs6000.cc (output_toc): Use the mode of the 128-bit
++ floating constant itself for real_to_target call.
++
++2023-06-15 Lulu Cheng <chenglulu@loongson.cn>
++
++ Backported from master:
++ 2023-06-15 Lulu Cheng <chenglulu@loongson.cn>
++ Andrew Pinski <apinski@marvell.com>
++
++ PR target/110136
++ * config/loongarch/loongarch.md: Modify the register constraints for template
++ "jumptable" and "indirect_jump" from "r" to "e".
++
++2023-06-12 Richard Biener <rguenther@suse.de>
++
++ Backported from master:
++ 2023-06-12 Richard Biener <rguenther@suse.de>
++
++ PR middle-end/110200
++ * genmatch.cc (expr::gen_transform): Put braces around
++ the if arm for the (convert ...) short-cut.
++
++2023-06-10 Georg-Johann Lay <avr@gjlay.de>
++
++ PR target/109650
++ PR target/92729
++ Backport from 2023-05-10 master r14-1688.
++ * config/avr/avr-passes.def (avr_pass_ifelse): Insert new pass.
++ * config/avr/avr.cc (avr_pass_ifelse): New RTL pass.
++ (avr_pass_data_ifelse): New pass_data for it.
++ (make_avr_pass_ifelse, avr_redundant_compare, avr_cbranch_cost)
++ (avr_canonicalize_comparison, avr_out_plus_set_ZN)
++ (avr_out_cmp_ext): New functions.
++ (compare_condtition): Make sure REG_CC dies in the branch insn.
++ (avr_rtx_costs_1): Add computation of cbranch costs.
++ (avr_adjust_insn_length) [ADJUST_LEN_ADD_SET_ZN, ADJUST_LEN_CMP_ZEXT]:
++ [ADJUST_LEN_CMP_SEXT]Handle them.
++ (TARGET_CANONICALIZE_COMPARISON): New define.
++ (avr_simplify_comparison_p, compare_diff_p, avr_compare_pattern)
++ (avr_reorg_remove_redundant_compare, avr_reorg): Remove functions.
++ (TARGET_MACHINE_DEPENDENT_REORG): Remove define.
++ * config/avr/avr-protos.h (avr_simplify_comparison_p): Remove proto.
++ (make_avr_pass_ifelse, avr_out_plus_set_ZN, cc_reg_rtx)
++ (avr_out_cmp_zext): New Protos
++ * config/avr/avr.md (branch, difficult_branch): Don't split insns.
++ (*cbranchhi.zero-extend.0", *cbranchhi.zero-extend.1")
++ (*swapped_tst<mode>, *add.for.eqne.<mode>): New insns.
++ (*cbranch<mode>4): Rename to cbranch<mode>4_insn.
++ (define_peephole): Add dead_or_set_regno_p(insn,REG_CC) as needed.
++ (define_deephole2): Add peep2_regno_dead_p(*,REG_CC) as needed.
++ Add new RTL peepholes for decrement-and-branch and *swapped_tst<mode>.
++ Rework signtest-and-branch peepholes for *sbrx_branch<mode>.
++ (adjust_len) [add_set_ZN, cmp_zext]: New.
++ (QIPSI): New mode iterator.
++ (ALLs1, ALLs2, ALLs4, ALLs234): New mode iterators.
++ (gelt): New code iterator.
++ (gelt_eqne): New code attribute.
++ (rvbranch, *rvbranch, difficult_rvbranch, *difficult_rvbranch)
++ (branch_unspec, *negated_tst<mode>, *reversed_tst<mode>)
++ (*cmpqi_sign_extend): Remove insns.
++ (define_c_enum "unspec") [UNSPEC_IDENTITY]: Remove.
++ * config/avr/avr-dimode.md (cbranch<mode>4): Canonicalize comparisons.
++ * config/avr/predicates.md (scratch_or_d_register_operand): New.
++ * config/avr/constraints.md (Yxx): New constraint.
++
++2023-06-09 Jeevitha Palanisamy <jeevitha@linux.ibm.com>
++
++ Backported from master:
++ 2023-06-06 Jeevitha Palanisamy <jeevitha@linux.ibm.com>
++
++ PR target/106907
++ * config/rs6000/rs6000.cc (vec_const_128bit_to_bytes): Remove
++ duplicate expression.
++
++2023-06-09 Iain Sandoe <iain@sandoe.co.uk>
++
++ Backported from master:
++ 2023-06-02 Iain Sandoe <iain@sandoe.co.uk>
++
++ PR target/110044
++ * config/rs6000/rs6000.cc (darwin_rs6000_special_round_type_align):
++ Make sure that we do not have a cap on field alignment before altering
++ the struct layout based on the type alignment of the first entry.
++
++2023-06-09 liuhongt <hongtao.liu@intel.com>
++
++ PR target/110108
++ * config/i386/i386.cc (ix86_gimple_fold_builtin): Explicitly
++ view_convert_expr mask to signed type when folding pblendvb
++ builtins.
++
++2023-06-08 Alex Coplan <alex.coplan@arm.com>
++
++ Backported from master:
++ 2023-05-25 Alex Coplan <alex.coplan@arm.com>
++
++ PR target/109800
++ * config/arm/arm.md (movdf): Generate temporary pseudo in DImode
++ instead of DFmode.
++ * config/arm/vfp.md (no_literal_pool_df_immediate): Rather than punning an
++ lvalue DFmode pseudo into DImode, use a DImode pseudo and pun it into
++ DFmode as an rvalue.
++
++2023-06-08 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
++
++ Backported from master:
++ 2023-05-24 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
++
++ PR target/109939
++ * config/arm/arm-builtins.cc (SAT_BINOP_UNSIGNED_IMM_QUALIFIERS): Use
++ qualifier_none for the return operand.
++
++2023-06-02 Georg-Johann Lay <avr@gjlay.de>
++
++ PR target/110088
++ * config/avr/avr.md: Add an RTL peephole to optimize operations on
++ non-LD_REGS after a move from LD_REGS.
++ (piaop): New code iterator.
++
++2023-06-01 Jonathan Wakely <jwakely@redhat.com>
++
++ Backported from master:
++ 2023-06-01 Jonathan Wakely <jwakely@redhat.com>
++
++ PR target/109954
++ * doc/invoke.texi (x86 Options): Fix description of -m32 option.
++
++2023-05-30 Andreas Schwab <schwab@suse.de>
++
++ PR target/110036
++ * config/riscv/riscv.cc (riscv_asan_shadow_offset): Update to
++ match libsanitizer.
++
++2023-05-25 Georg-Johann Lay <avr@gjlay.de>
++
++ PR target/104327
++ * config/avr/avr.cc (avr_can_inline_p): New static function.
++ (TARGET_CAN_INLINE_P): Define to that function.
++
++2023-05-25 Georg-Johann Lay <avr@gjlay.de>
++
++ PR target/82931
++ * config/avr/avr.md (*movbitqi.0): Rename to *movbit<mode>.0-6.
++ Handle any bit position and use mode QISI.
++ * config/avr/avr.cc (avr_rtx_costs_1) [IOR]: Return a cost
++ of 2 insns for bit-transfer of respective style.
++
++2023-05-23 Georg-Johann Lay <avr@gjlay.de>
++
++ * config/avr/avr.cc (avr_insn_cost): New static function.
++ (TARGET_INSN_COST): Define to that function.
++
++2023-05-22 Michael Meissner <meissner@linux.ibm.com>
++
++ PR target/70243
++ * config/rs6000/vsx.md (vsx_fmav4sf4): Do not generate vmaddfp.
++ (vsx_nfmsv4sf4): Do not generate vnmsubfp. Back port from master
++ 04/10/2023 change.
++
++2023-05-22 Jakub Jelinek <jakub@redhat.com>
++
++ Backported from master:
++ 2023-05-21 Jakub Jelinek <jakub@redhat.com>
++
++ PR tree-optimization/109505
++ * match.pd ((x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2),
++ Combine successive equal operations with constants,
++ (A +- CST1) +- CST2 -> A + CST3, (CST1 - A) +- CST2 -> CST3 - A,
++ CST1 - (CST2 - A) -> CST3 + A): Use ! on ops with 2 CONSTANT_CLASS_P
++ operands.
++
++2023-05-22 Kewen Lin <linkw@linux.ibm.com>
++
++ Backported from master:
++ 2023-05-17 Kewen Lin <linkw@linux.ibm.com>
++
++ * tree-vect-loop.cc (vect_analyze_loop_1): Don't retry analysis with
++ suggested unroll factor once the previous analysis fails.
++
++2023-05-20 Triffid Hunter <triffid.hunter@gmail.com>
++
++ PR target/105753
++ Backport from 2023-05-20 https://gcc.gnu.org/r14-1016
++ * config/avr/avr.md (divmodpsi, udivmodpsi, divmodsi, udivmodsi):
++ Remove superfluous "parallel" in insn pattern.
++ ([u]divmod<mode>4): Tidy code. Use gcc_unreachable() instead of
++ printing error text to assembly.
++
++2023-05-18 Alexandre Oliva <oliva@adacore.com>
++
++ * config/arm/vfp.md (*thumb2_movsi_vfp): Drop blank after tab
++ after vmsr and vmrs, and lower the case of P0.
++
++2023-05-18 Stam Markianos-Wright <stam.markianos-wright@arm.com>
++
++ * config/arm/arm_mve.h: (__ARM_mve_typeid): Add more pointer types.
++ (__ARM_mve_coerce1): Remove.
++ (__ARM_mve_coerce2): Remove.
++ (__ARM_mve_coerce3): Remove.
++ (__ARM_mve_coerce_i_scalar): New.
++ (__ARM_mve_coerce_s8_ptr): New.
++ (__ARM_mve_coerce_u8_ptr): New.
++ (__ARM_mve_coerce_s16_ptr): New.
++ (__ARM_mve_coerce_u16_ptr): New.
++ (__ARM_mve_coerce_s32_ptr): New.
++ (__ARM_mve_coerce_u32_ptr): New.
++ (__ARM_mve_coerce_s64_ptr): New.
++ (__ARM_mve_coerce_u64_ptr): New.
++ (__ARM_mve_coerce_f_scalar): New.
++ (__ARM_mve_coerce_f16_ptr): New.
++ (__ARM_mve_coerce_f32_ptr): New.
++ (__arm_vst4q): Change _coerce_ overloads.
++ (__arm_vbicq): Change _coerce_ overloads.
++ (__arm_vmulq): Change _coerce_ overloads.
++ (__arm_vcmpeqq): Change _coerce_ overloads.
++ (__arm_vcmpneq): Change _coerce_ overloads.
++ (__arm_vmaxnmavq): Change _coerce_ overloads.
++ (__arm_vmaxnmvq): Change _coerce_ overloads.
++ (__arm_vminnmavq): Change _coerce_ overloads.
++ (__arm_vsubq): Change _coerce_ overloads.
++ (__arm_vminnmvq): Change _coerce_ overloads.
++ (__arm_vrshlq): Change _coerce_ overloads.
++ (__arm_vqsubq): Change _coerce_ overloads.
++ (__arm_vqdmulltq): Change _coerce_ overloads.
++ (__arm_vqdmullbq): Change _coerce_ overloads.
++ (__arm_vqdmulhq): Change _coerce_ overloads.
++ (__arm_vqaddq): Change _coerce_ overloads.
++ (__arm_vhaddq): Change _coerce_ overloads.
++ (__arm_vhsubq): Change _coerce_ overloads.
++ (__arm_vqdmlashq): Change _coerce_ overloads.
++ (__arm_vqrdmlahq): Change _coerce_ overloads.
++ (__arm_vmlasq): Change _coerce_ overloads.
++ (__arm_vqdmlahq): Change _coerce_ overloads.
++ (__arm_vmaxnmavq_p): Change _coerce_ overloads.
++ (__arm_vmaxnmvq_p): Change _coerce_ overloads.
++ (__arm_vminnmavq_p): Change _coerce_ overloads.
++ (__arm_vminnmvq_p): Change _coerce_ overloads.
++ (__arm_vfmasq_m): Change _coerce_ overloads.
++ (__arm_vld1q): Change _coerce_ overloads.
++ (__arm_vld1q_z): Change _coerce_ overloads.
++ (__arm_vld2q): Change _coerce_ overloads.
++ (__arm_vld4q): Change _coerce_ overloads.
++ (__arm_vldrhq_gather_offset): Change _coerce_ overloads.
++ (__arm_vldrhq_gather_offset_z): Change _coerce_ overloads.
++ (__arm_vldrhq_gather_shifted_offset): Change _coerce_ overloads.
++ (__arm_vldrhq_gather_shifted_offset_z): Change _coerce_ overloads.
++ (__arm_vldrwq_gather_offset): Change _coerce_ overloads.
++ (__arm_vldrwq_gather_offset_z): Change _coerce_ overloads.
++ (__arm_vldrwq_gather_shifted_offset): Change _coerce_ overloads.
++ (__arm_vldrwq_gather_shifted_offset_z): Change _coerce_ overloads.
++ (__arm_vst1q_p): Change _coerce_ overloads.
++ (__arm_vst2q): Change _coerce_ overloads.
++ (__arm_vst1q): Change _coerce_ overloads.
++ (__arm_vstrhq): Change _coerce_ overloads.
++ (__arm_vstrhq_p): Change _coerce_ overloads.
++ (__arm_vstrhq_scatter_offset_p): Change _coerce_ overloads.
++ (__arm_vstrhq_scatter_offset): Change _coerce_ overloads.
++ (__arm_vstrhq_scatter_shifted_offset_p): Change _coerce_ overloads.
++ (__arm_vstrhq_scatter_shifted_offset): Change _coerce_ overloads.
++ (__arm_vstrwq_p): Change _coerce_ overloads.
++ (__arm_vstrwq): Change _coerce_ overloads.
++ (__arm_vstrwq_scatter_offset): Change _coerce_ overloads.
++ (__arm_vstrwq_scatter_offset_p): Change _coerce_ overloads.
++ (__arm_vstrwq_scatter_shifted_offset): Change _coerce_ overloads.
++ (__arm_vstrwq_scatter_shifted_offset_p): Change _coerce_ overloads.
++ (__arm_vsetq_lane): Change _coerce_ overloads.
++ (__arm_vcmpneq_m): Change _coerce_ overloads.
++ (__arm_vldrbq_gather_offset): Change _coerce_ overloads.
++ (__arm_vdwdupq_x_u8): Change _coerce_ overloads.
++ (__arm_vdwdupq_x_u16): Change _coerce_ overloads.
++ (__arm_vdwdupq_x_u32): Change _coerce_ overloads.
++ (__arm_viwdupq_x_u8): Change _coerce_ overloads.
++ (__arm_viwdupq_x_u16): Change _coerce_ overloads.
++ (__arm_viwdupq_x_u32): Change _coerce_ overloads.
++ (__arm_vidupq_x_u8): Change _coerce_ overloads.
++ (__arm_vddupq_x_u8): Change _coerce_ overloads.
++ (__arm_vidupq_x_u16): Change _coerce_ overloads.
++ (__arm_vddupq_x_u16): Change _coerce_ overloads.
++ (__arm_vidupq_x_u32): Change _coerce_ overloads.
++ (__arm_vddupq_x_u32): Change _coerce_ overloads.
++ (__arm_vhaddq_x): Change _coerce_ overloads.
++ (__arm_vhsubq_x): Change _coerce_ overloads.
++ (__arm_vldrdq_gather_offset): Change _coerce_ overloads.
++ (__arm_vldrdq_gather_offset_z): Change _coerce_ overloads.
++ (__arm_vldrdq_gather_shifted_offset): Change _coerce_ overloads.
++ (__arm_vldrdq_gather_shifted_offset_z): Change _coerce_ overloads.
++ (__arm_vldrbq_gather_offset_z): Change _coerce_ overloads.
++ (__arm_vqrdmlahq_m): Change _coerce_ overloads.
++ (__arm_vqrdmlashq_m): Change _coerce_ overloads.
++ (__arm_vqdmlashq_m): Change _coerce_ overloads.
++ (__arm_vmlaldavaxq_p): Change _coerce_ overloads.
++ (__arm_vmlasq_m): Change _coerce_ overloads.
++ (__arm_vqdmulhq_m): Change _coerce_ overloads.
++ (__arm_vqdmulltq_m): Change _coerce_ overloads.
++ (__arm_vidupq_u16): Change _coerce_ overloads.
++ (__arm_vidupq_u32): Change _coerce_ overloads.
++ (__arm_vidupq_u8): Change _coerce_ overloads.
++ (__arm_vddupq_u16): Change _coerce_ overloads.
++ (__arm_vddupq_u32): Change _coerce_ overloads.
++ (__arm_vddupq_u8): Change _coerce_ overloads.
++ (__arm_viwdupq_m): Change _coerce_ overloads.
++ (__arm_viwdupq_u16): Change _coerce_ overloads.
++ (__arm_viwdupq_u32): Change _coerce_ overloads.
++ (__arm_viwdupq_u8): Change _coerce_ overloads.
++ (__arm_vdwdupq_m): Change _coerce_ overloads.
++ (__arm_vdwdupq_u16): Change _coerce_ overloads.
++ (__arm_vdwdupq_u32): Change _coerce_ overloads.
++ (__arm_vdwdupq_u8): Change _coerce_ overloads.
++ (__arm_vaddlvaq): Change _coerce_ overloads.
++ (__arm_vaddlvaq_p): Change _coerce_ overloads.
++ (__arm_vaddvaq): Change _coerce_ overloads.
++ (__arm_vaddvaq_p): Change _coerce_ overloads.
++ (__arm_vcmphiq_m): Change _coerce_ overloads.
++ (__arm_vmladavaq_p): Change _coerce_ overloads.
++ (__arm_vmladavaxq): Change _coerce_ overloads.
++ (__arm_vmlaldavaxq): Change _coerce_ overloads.
++ (__arm_vstrbq): Change _coerce_ overloads.
++ (__arm_vstrbq_p): Change _coerce_ overloads.
++ (__arm_vrmlaldavhaq_p): Change _coerce_ overloads.
++ (__arm_vstrbq_scatter_offset): Change _coerce_ overloads.
++ (__arm_vstrbq_scatter_offset_p): Change _coerce_ overloads.
++ (__arm_vstrdq_scatter_offset_p): Change _coerce_ overloads.
++ (__arm_vstrdq_scatter_offset): Change _coerce_ overloads.
++ (__arm_vstrdq_scatter_shifted_offset_p): Change _coerce_ overloads.
++ (__arm_vstrdq_scatter_shifted_offset): Change _coerce_ overloads.
++
++2023-05-18 Stam Markianos-Wright <stam.markianos-wright@arm.com>
++
++ * config/arm/arm_mve.h (__arm_vbicq): Change coerce on
++ scalar constant.
++ (__arm_vmvnq_m): Likewise.
++
++2023-05-18 Stam Markianos-Wright <stam.markianos-wright@arm.com>
++
++ * config/arm/arm_mve.h (__arm_vorrq): Add _n variant.
++
++2023-05-18 Stam Markianos-Wright <stam.markianos-wright@arm.com>
++
++ Backported from master:
++ 2023-05-18 Stam Markianos-Wright <stam.markianos-wright@arm.com>
++
++ * config/arm/arm_mve.h (__arm_vadcq_s32): Fix arithmetic.
++ (__arm_vadcq_u32): Likewise.
++ (__arm_vadcq_m_s32): Likewise.
++ (__arm_vadcq_m_u32): Likewise.
++ (__arm_vsbcq_s32): Likewise.
++ (__arm_vsbcq_u32): Likewise.
++ (__arm_vsbcq_m_s32): Likewise.
++ (__arm_vsbcq_m_u32): Likewise.
++ * config/arm/mve.md (get_fpscr_nzcvqc): Make unspec_volatile.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ * config/arm/mve.md (mve_vrndq_m_f<mode>, mve_vrev64q_f<mode>)
++ (mve_vrev32q_fv8hf, mve_vcvttq_f32_f16v4sf)
++ (mve_vcvtbq_f32_f16v4sf, mve_vcvtq_to_f_<supf><mode>)
++ (mve_vrev64q_<supf><mode>, mve_vcvtq_from_f_<supf><mode>)
++ (mve_vmovltq_<supf><mode>, mve_vmovlbq_<supf><mode>)
++ (mve_vcvtpq_<supf><mode>, mve_vcvtnq_<supf><mode>)
++ (mve_vcvtmq_<supf><mode>, mve_vcvtaq_<supf><mode>)
++ (mve_vmvnq_n_<supf><mode>, mve_vrev16q_<supf>v16qi)
++ (mve_vctp<mode1>qhi, mve_vbrsrq_n_f<mode>)
++ (mve_vbrsrq_n_<supf><mode>, mve_vandq_f<mode>, mve_vbicq_f<mode>)
++ (mve_vbicq_n_<supf><mode>, mve_vctp<mode1>q_mhi)
++ (mve_vcvtbq_f16_f32v8hf, mve_vcvttq_f16_f32v8hf)
++ (mve_veorq_f<mode>, mve_vmlaldavxq_s<mode>, mve_vmlsldavq_s<mode>)
++ (mve_vmlsldavxq_s<mode>, mve_vornq_f<mode>, mve_vorrq_f<mode>)
++ (mve_vrmlaldavhxq_sv4si, mve_vbicq_m_n_<supf><mode>)
++ (mve_vcvtq_m_to_f_<supf><mode>, mve_vshlcq_<supf><mode>)
++ (mve_vmvnq_m_<supf><mode>, mve_vpselq_<supf><mode>)
++ (mve_vcvtbq_m_f16_f32v8hf, mve_vcvtbq_m_f32_f16v4sf)
++ (mve_vcvttq_m_f16_f32v8hf, mve_vcvttq_m_f32_f16v4sf)
++ (mve_vmlaldavq_p_<supf><mode>, mve_vmlsldavaq_s<mode>)
++ (mve_vmlsldavaxq_s<mode>, mve_vmlsldavq_p_s<mode>)
++ (mve_vmlsldavxq_p_s<mode>, mve_vmvnq_m_n_<supf><mode>)
++ (mve_vorrq_m_n_<supf><mode>, mve_vpselq_f<mode>)
++ (mve_vrev32q_m_fv8hf, mve_vrev32q_m_<supf><mode>)
++ (mve_vrev64q_m_f<mode>, mve_vrmlaldavhaxq_sv4si)
++ (mve_vrmlaldavhxq_p_sv4si, mve_vrmlsldavhaxq_sv4si)
++ (mve_vrmlsldavhq_p_sv4si, mve_vrmlsldavhxq_p_sv4si)
++ (mve_vrev16q_m_<supf>v16qi, mve_vrmlaldavhq_p_<supf>v4si)
++ (mve_vrmlsldavhaq_sv4si, mve_vandq_m_<supf><mode>)
++ (mve_vbicq_m_<supf><mode>, mve_veorq_m_<supf><mode>)
++ (mve_vornq_m_<supf><mode>, mve_vorrq_m_<supf><mode>)
++ (mve_vandq_m_f<mode>, mve_vbicq_m_f<mode>, mve_veorq_m_f<mode>)
++ (mve_vornq_m_f<mode>, mve_vorrq_m_f<mode>)
++ (mve_vstrdq_scatter_shifted_offset_p_<supf>v2di_insn)
++ (mve_vstrdq_scatter_shifted_offset_<supf>v2di_insn)
++ (mve_vstrdq_scatter_base_wb_p_<supf>v2di) : Fix spacing and
++ capitalization in the emitted asm.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ * config/arm/constraints.md (mve_vldrd_immediate): Move it to
++ predicates.md.
++ (Ri): Move constraint definition from predicates.md.
++ (Rl): Define new constraint.
++ * config/arm/mve.md (mve_vstrwq_scatter_base_wb_p_<supf>v4si): Add
++ missing constraint.
++ (mve_vstrwq_scatter_base_wb_p_fv4sf): Add missing Up constraint
++ for op 1, use mve_vstrw_immediate predicate and Rl constraint for
++ op 2. Fix asm output spacing.
++ (mve_vstrdq_scatter_base_wb_p_<supf>v2di): Add missing constraint.
++ * config/arm/predicates.md (Ri) Move constraint to constraints.md
++ (mve_vldrd_immediate): Move it from
++ constraints.md.
++ (mve_vstrw_immediate): New predicate.
++
++2023-05-18 Murray Steele <murray.steele@arm.com>
++
++ Backported from master:
++ 2023-01-18 Murray Steele <murray.steele@arm.com>
++
++ PR target/108442
++ * config/arm/arm_mve.h (__arm_vst1q_p_u8): Use prefixed intrinsic
++ function.
++ (__arm_vst1q_p_s8): Likewise.
++ (__arm_vld1q_z_u8): Likewise.
++ (__arm_vld1q_z_s8): Likewise.
++ (__arm_vst1q_p_u16): Likewise.
++ (__arm_vst1q_p_s16): Likewise.
++ (__arm_vld1q_z_u16): Likewise.
++ (__arm_vld1q_z_s16): Likewise.
++ (__arm_vst1q_p_u32): Likewise.
++ (__arm_vst1q_p_s32): Likewise.
++ (__arm_vld1q_z_u32): Likewise.
++ (__arm_vld1q_z_s32): Likewise.
++ (__arm_vld1q_z_f16): Likewise.
++ (__arm_vst1q_p_f16): Likewise.
++ (__arm_vld1q_z_f32): Likewise.
++ (__arm_vst1q_p_f32): Likewise.
++
++2023-05-18 Andre Vieira <andre.simoesdiasvieira@arm.com>
++
++ Backported from master:
++ 2023-01-24 Andre Vieira <andre.simoesdiasvieira@arm.com>
++
++ PR target/108177
++ * config/arm/mve.md (mve_vstrbq_p_<supf><mode>, mve_vstrhq_p_fv8hf,
++ mve_vstrhq_p_<supf><mode>, mve_vstrwq_p_<supf>v4si): Add memory operand
++ as input operand.
++
++2023-05-18 Stam Markianos-Wright <stam.markianos-wright@arm.com>
++
++ Backported from master:
++ 2023-04-04 Stam Markianos-Wright <stam.markianos-wright@arm.com>
++
++ * config/arm/mve.md (mve_vcvtq_n_to_f_<supf><mode>): Swap operands.
++ (mve_vcreateq_f<mode>): Swap operands.
++
++2023-05-18 Stam Markianos-Wright <stam.markianos-wright@arm.com>
++
++ Backported from master:
++ 2023-01-16 Stam Markianos-Wright <stam.markianos-wright@arm.com>
++
++ PR target/96795
++ PR target/107515
++ * config/arm/arm_mve.h (__ARM_mve_coerce2): Split types.
++ (__ARM_mve_coerce3): Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2023-01-25 Andrea Corallo <andrea.corallo@arm.com>
++
++ * config/arm/mve.md (mve_vqnegq_s<mode>): Fix spacing.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2023-01-25 Andrea Corallo <andrea.corallo@arm.com>
++
++ * config/arm/mve.md (mve_vqabsq_s<mode>): Fix spacing.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2023-01-25 Andrea Corallo <andrea.corallo@arm.com>
++
++ * config/arm/mve.md (mve_vnegq_f<mode>, mve_vnegq_s<mode>):
++ Fix spacing.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2023-01-25 Andrea Corallo <andrea.corallo@arm.com>
++
++ * config/arm/mve.md (@mve_vclzq_s<mode>): Fix spacing.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2023-01-25 Andrea Corallo <andrea.corallo@arm.com>
++
++ * config/arm/mve.md (mve_vclsq_s<mode>): Fix spacing.
++
++2023-05-18 Christophe Lyon <christophe.lyon@arm.com>
++
++ Backported from master:
++ 2023-02-03 Christophe Lyon <christophe.lyon@arm.com>
++
++ * config/arm/mve.md (mve_vabavq_p_<supf><mode>): Add length
++ attribute.
++ (mve_vqshluq_m_n_s<mode>): Likewise.
++ (mve_vshlq_m_<supf><mode>): Likewise.
++ (mve_vsriq_m_n_<supf><mode>): Likewise.
++ (mve_vsubq_m_<supf><mode>): Likewise.
++
++2023-05-18 Christophe Lyon <christophe.lyon@arm.com>
++
++ Backported from master:
++ 2022-10-03 Christophe Lyon <christophe.lyon@arm.com>
++
++ * config/arm/mve.md (mve_vrev64q_m_<supf><mode>): Add early
++ clobber.
++ (mve_vrev64q_m_f<mode>): Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * config/arm/mve.md (mve_vrmlaldavhq_<supf>v4si,
++ mve_vrmlaldavhaq_<supf>v4si): Fix spacing vs tabs.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * config/arm/mve.md (mve_vmlaldavaq_<supf><mode>)
++ (mve_vmlaldavaxq_s<mode>, mve_vmlaldavaxq_p_<supf><mode>): Fix
++ spacing vs tabs.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * config/arm/mve.md (mve_vsubq_n_f<mode>): Fix spacing.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * config/arm/mve.md (mve_vaddlvq_p_<supf>v4si)
++ (mve_vaddq_n_<supf><mode>, mve_vaddvaq_<supf><mode>)
++ (mve_vaddlvaq_<supf>v4si, mve_vaddq_n_f<mode>)
++ (mve_vaddlvaq_p_<supf>v4si, mve_vaddq<mode>, mve_vaddq_f<mode>):
++ Fix spacing.
++
++2023-05-18 Stam Markianos-Wright <stam.markianos-wright@arm.com>
++
++ Backported from master:
++ 2022-11-28 Stam Markianos-Wright <stam.markianos-wright@arm.com>
++
++ * config/arm/arm_mve.h (__arm_vsubq_x FP): New overloads.
++ (__arm_vsubq_x Integer): New.
++
++2023-05-18 Stam Markianos-Wright <stam.markianos-wright@arm.com>
++
++ Backported from master:
++ 2022-11-28 Stam Markianos-Wright <stam.markianos-wright@arm.com>
++
++ PR target/107515
++ * config/arm/arm_mve.h (__ARM_mve_typeid): Add float types.
++
++2023-05-18 Stam Markianos-Wright <stam.markianos-wright@arm.com>
++
++ Backported from master:
++ 2022-11-28 Stam Markianos-Wright <stam.markianos-wright@arm.com>
++
++ PR target/96795
++ * config/arm/arm_mve.h (__arm_vaddq): Fix Overloading.
++ (__arm_vmulq): Likewise.
++ (__arm_vcmpeqq): Likewise.
++ (__arm_vcmpneq): Likewise.
++ (__arm_vmaxnmavq): Likewise.
++ (__arm_vmaxnmvq): Likewise.
++ (__arm_vminnmavq): Likewise.
++ (__arm_vsubq): Likewise.
++ (__arm_vminnmvq): Likewise.
++ (__arm_vrshlq): Likewise.
++ (__arm_vqsubq): Likewise.
++ (__arm_vqdmulltq): Likewise.
++ (__arm_vqdmullbq): Likewise.
++ (__arm_vqdmulhq): Likewise.
++ (__arm_vqaddq): Likewise.
++ (__arm_vhaddq): Likewise.
++ (__arm_vhsubq): Likewise.
++ (__arm_vqdmlashq): Likewise.
++ (__arm_vqrdmlahq): Likewise.
++ (__arm_vmlasq): Likewise.
++ (__arm_vqdmlahq): Likewise.
++ (__arm_vmaxnmavq_p): Likewise.
++ (__arm_vmaxnmvq_p): Likewise.
++ (__arm_vminnmavq_p): Likewise.
++ (__arm_vminnmvq_p): Likewise.
++ (__arm_vfmasq_m): Likewise.
++ (__arm_vsetq_lane): Likewise.
++ (__arm_vcmpneq_m): Likewise.
++ (__arm_vhaddq_x): Likewise.
++ (__arm_vhsubq_x): Likewise.
++ (__arm_vqrdmlashq_m): Likewise.
++ (__arm_vqdmlashq_m): Likewise.
++ (__arm_vmlaldavaxq_p): Likewise.
++ (__arm_vmlasq_m): Likewise.
++ (__arm_vqdmulhq_m): Likewise.
++ (__arm_vqdmulltq_m): Likewise.
++ (__arm_viwdupq_m): Likewise.
++ (__arm_viwdupq_u16): Likewise.
++ (__arm_viwdupq_u32): Likewise.
++ (__arm_viwdupq_u8): Likewise.
++ (__arm_vdwdupq_m): Likewise.
++ (__arm_vdwdupq_u16): Likewise.
++ (__arm_vdwdupq_u32): Likewise.
++ (__arm_vdwdupq_u8): Likewise.
++ (__arm_vaddlvaq): Likewise.
++ (__arm_vaddlvaq_p): Likewise.
++ (__arm_vaddvaq): Likewise.
++ (__arm_vaddvaq_p): Likewise.
++ (__arm_vcmphiq_m): Likewise.
++ (__arm_vmladavaq_p): Likewise.
++ (__arm_vmladavaxq): Likewise.
++ (__arm_vmlaldavaxq): Likewise.
++ (__arm_vrmlaldavhaq_p): Likewise.
++
++2023-05-18 Stam Markianos-Wright <stam.markianos-wright@arm.com>
++
++ Backported from master:
++ 2022-11-28 Stam Markianos-Wright <stam.markianos-wright@arm.com>
++
++ PR target/96795
++ * config/arm/arm_mve.h (__arm_vaddq_m_n_s8): Change types.
++ (__arm_vaddq_m_n_s32): Likewise.
++ (__arm_vaddq_m_n_s16): Likewise.
++ (__arm_vaddq_m_n_u8): Likewise.
++ (__arm_vaddq_m_n_u32): Likewise.
++ (__arm_vaddq_m_n_u16): Likewise.
++ (__arm_vaddq_m): Fix Overloading.
++ (__ARM_mve_coerce3): New.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * config/arm/mve.md (mve_vabsq_f<mode>): Fix spacing.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * config/arm/mve.md (@mve_vcmp<mve_cmp_op>q_<mode>): Fix
++ spacing.
++ * config/arm/arm_mve.h (__arm_vcmpgtq_m, __arm_vcmpleq_m)
++ (__arm_vcmpltq_m, __arm_vcmpneq_m): Add missing defines.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * config/arm/mve.md (mve_vdupq_n_f<mode>)
++ (mve_vdupq_n_<supf><mode>, mve_vdupq_m_n_<supf><mode>)
++ (mve_vdupq_m_n_f<mode>): Fix spacing.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * config/arm/mve.md (mve_vdwdupq_m_wb_u<mode>_insn): Fix spacing.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * config/arm/mve.md (mve_vddupq_u<mode>_insn): Fix 'vddup.u'
++ spacing.
++ (mve_vddupq_m_wb_u<mode>_insn): Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * config/arm/vfp.md (*thumb2_movhi_vfp, *thumb2_movhi_fp16): Fix
++ 'vmsr' spacing and reg capitalization.
++
++2023-05-15 liuhongt <hongtao.liu@intel.com>
++
++ * config/i386/cygwin.h (ENDFILE_SPEC): Link crtfastmath.o
++ whenever -mdaz-ftz is specified. Don't link crtfastmath.o
++ when -mno-daz-ftz is specified.
++ * config/i386/darwin.h (ENDFILE_SPEC): Ditto.
++ * config/i386/gnu-user-common.h
++ (GNU_USER_TARGET_MATHFILE_SPEC): Ditto.
++ * config/i386/mingw32.h (ENDFILE_SPEC): Ditto.
++ * config/i386/i386.opt (mdaz-ftz): New option.
++ * doc/invoke.texi (x86 options): Document mftz-daz.
++
++2023-05-09 Jakub Jelinek <jakub@redhat.com>
++
++ Backported from master:
++ 2023-05-09 Jakub Jelinek <jakub@redhat.com>
++
++ PR tree-optimization/109778
++ * wide-int.h (wi::lrotate, wi::rrotate): Call wi::lrshift on
++ wi::zext (x, width) rather than x if width != precision, rather
++ than using wi::zext (right, width) after the shift.
++ * tree-ssa-ccp.cc (bit_value_binop): Call wi::ext on the results
++ of wi::lrotate or wi::rrotate.
++
++2023-05-09 Kewen Lin <linkw@linux.ibm.com>
++
++ Backported from master:
++ 2023-04-26 Kewen Lin <linkw@linux.ibm.com>
++
++ PR target/108758
++ * config/rs6000/rs6000-builtins.def
++ (__builtin_vsx_scalar_cmp_exp_qp_eq, __builtin_vsx_scalar_cmp_exp_qp_gt
++ __builtin_vsx_scalar_cmp_exp_qp_lt,
++ __builtin_vsx_scalar_cmp_exp_qp_unordered): Move from stanza ieee128-hw
++ to power9-vector.
++
++2023-05-09 Kewen Lin <linkw@linux.ibm.com>
++
++ Backported from master:
++ 2023-04-26 Kewen Lin <linkw@linux.ibm.com>
++
++ PR target/109069
++ * config/rs6000/altivec.md (sldoi_to_mov<mode>): Replace predicate
++ easy_vector_constant with const_vector_each_byte_same, add
++ handlings in preparation for !easy_vector_constant, and update
++ VECTOR_UNIT_ALTIVEC_OR_VSX_P with VECTOR_MEM_ALTIVEC_OR_VSX_P.
++ * config/rs6000/predicates.md (const_vector_each_byte_same): New
++ predicate.
++
+ 2023-05-08 Release Manager
+
+ * GCC 12.3.0 released.
+--- a/src/gcc/DATESTAMP
++++ b/src/gcc/DATESTAMP
+@@ -1 +1 @@
+-20230508
++20231008
+--- a/src/gcc/c-family/ChangeLog
++++ b/src/gcc/c-family/ChangeLog
+@@ -1,3 +1,22 @@
++2023-05-19 Patrick Palka <ppalka@redhat.com>
++
++ Backported from master:
++ 2023-05-15 Patrick Palka <ppalka@redhat.com>
++
++ * c-cppbuiltin.cc (c_cpp_builtins): Predefine __cpp_auto_cast
++ for C++23.
++
++2023-05-09 Martin Uecker <uecker@tugraz.at>
++
++ Backported from master:
++ 2023-02-18 Martin Uecker <uecker@tugraz.at>
++
++ PR c/105660
++ * c-attribs.cc (append_access_attr): Use order of arguments when
++ construction string.
++ (append_access_attr_idxs): Rename and make static.
++ * c-warn.cc (warn_parm_array_mismatch): Add assertion.
++
+ 2023-05-08 Release Manager
+
+ * GCC 12.3.0 released.
+--- a/src/gcc/c-family/c-attribs.cc
++++ b/src/gcc/c-family/c-attribs.cc
+@@ -4624,22 +4624,27 @@ append_access_attr (tree node[3], tree attrs, const char *attrstr,
+ rdwr_map cur_idxs;
+ init_attr_rdwr_indices (&cur_idxs, attrs);
+
++ tree args = TYPE_ARG_TYPES (node[0]);
++ int argpos = 0;
+ std::string spec;
+- for (auto it = new_idxs.begin (); it != new_idxs.end (); ++it)
++ for (tree arg = args; arg; arg = TREE_CHAIN (arg), argpos++)
+ {
+- const auto &newaxsref = *it;
++ const attr_access* const newa = new_idxs.get (argpos);
++
++ if (!newa)
++ continue;
+
+ /* The map has two equal entries for each pointer argument that
+ has an associated size argument. Process just the entry for
+ the former. */
+- if ((unsigned)newaxsref.first != newaxsref.second.ptrarg)
++ if ((unsigned)argpos != newa->ptrarg)
+ continue;
+
+- const attr_access* const cura = cur_idxs.get (newaxsref.first);
++ const attr_access* const cura = cur_idxs.get (argpos);
+ if (!cura)
+ {
+ /* The new attribute needs to be added. */
+- tree str = newaxsref.second.to_internal_string ();
++ tree str = newa->to_internal_string ();
+ spec += TREE_STRING_POINTER (str);
+ continue;
+ }
+@@ -4647,7 +4652,6 @@ append_access_attr (tree node[3], tree attrs, const char *attrstr,
+ /* The new access spec refers to an array/pointer argument for
+ which an access spec already exists. Check and diagnose any
+ conflicts. If no conflicts are found, merge the two. */
+- const attr_access* const newa = &newaxsref.second;
+
+ if (!attrstr)
+ {
+@@ -4782,7 +4786,7 @@ append_access_attr (tree node[3], tree attrs, const char *attrstr,
+ continue;
+
+ /* Merge the CURA and NEWA. */
+- attr_access merged = newaxsref.second;
++ attr_access merged = *newa;
+
+ /* VLA seen in a declaration takes precedence. */
+ if (cura->minsize == HOST_WIDE_INT_M1U)
+@@ -4808,9 +4812,9 @@ append_access_attr (tree node[3], tree attrs, const char *attrstr,
+
+ /* Convenience wrapper for the above. */
+
+-tree
+-append_access_attr (tree node[3], tree attrs, const char *attrstr,
+- char code, HOST_WIDE_INT idxs[2])
++static tree
++append_access_attr_idxs (tree node[3], tree attrs, const char *attrstr,
++ char code, HOST_WIDE_INT idxs[2])
+ {
+ char attrspec[80];
+ int n = sprintf (attrspec, "%c%u", code, (unsigned) idxs[0] - 1);
+@@ -5101,7 +5105,7 @@ handle_access_attribute (tree node[3], tree name, tree args, int flags,
+ attributes specified on previous declarations of the same type
+ and if not, concatenate the two. */
+ const char code = attr_access::mode_chars[mode];
+- tree new_attrs = append_access_attr (node, attrs, attrstr, code, idxs);
++ tree new_attrs = append_access_attr_idxs (node, attrs, attrstr, code, idxs);
+ if (!new_attrs)
+ return NULL_TREE;
+
+@@ -5114,7 +5118,7 @@ handle_access_attribute (tree node[3], tree name, tree args, int flags,
+ {
+ /* Repeat for the previously declared type. */
+ attrs = TYPE_ATTRIBUTES (TREE_TYPE (node[1]));
+- new_attrs = append_access_attr (node, attrs, attrstr, code, idxs);
++ new_attrs = append_access_attr_idxs (node, attrs, attrstr, code, idxs);
+ if (!new_attrs)
+ return NULL_TREE;
+
+--- a/src/gcc/c-family/c-cppbuiltin.cc
++++ b/src/gcc/c-family/c-cppbuiltin.cc
+@@ -1080,6 +1080,7 @@ c_cpp_builtins (cpp_reader *pfile)
+ cpp_define (pfile, "__cpp_if_consteval=202106L");
+ cpp_define (pfile, "__cpp_constexpr=202110L");
+ cpp_define (pfile, "__cpp_multidimensional_subscript=202110L");
++ cpp_define (pfile, "__cpp_auto_cast=202110L");
+ }
+ if (flag_concepts)
+ {
+--- a/src/gcc/c-family/c-warn.cc
++++ b/src/gcc/c-family/c-warn.cc
+@@ -3628,6 +3628,8 @@ warn_parm_array_mismatch (location_t origloc, tree fndecl, tree newparms)
+ for (tree newvbl = newa->size, curvbl = cura->size; newvbl;
+ newvbl = TREE_CHAIN (newvbl), curvbl = TREE_CHAIN (curvbl))
+ {
++ gcc_assert (curvbl);
++
+ tree newpos = TREE_PURPOSE (newvbl);
+ tree curpos = TREE_PURPOSE (curvbl);
+
+--- a/src/gcc/common/config/i386/cpuinfo.h
++++ b/src/gcc/common/config/i386/cpuinfo.h
+@@ -435,7 +435,6 @@ get_intel_cpu (struct __processor_model *cpu_model,
+ cpu_model->__cpu_subtype = INTEL_COREI7_SKYLAKE;
+ break;
+ case 0xa7:
+- case 0xa8:
+ /* Rocket Lake. */
+ cpu = "rocketlake";
+ CHECK___builtin_cpu_is ("corei7");
+@@ -508,7 +507,6 @@ get_intel_cpu (struct __processor_model *cpu_model,
+ break;
+ case 0x97:
+ case 0x9a:
+- case 0xbf:
+ /* Alder Lake. */
+ cpu = "alderlake";
+ CHECK___builtin_cpu_is ("corei7");
+@@ -649,7 +647,9 @@ get_available_features (struct __processor_model *cpu_model,
+ /* Get Advanced Features at level 7 (eax = 7, ecx = 0/1). */
+ if (max_cpuid_level >= 7)
+ {
+- __cpuid_count (7, 0, eax, ebx, ecx, edx);
++ unsigned int max_subleaf_level;
++
++ __cpuid_count (7, 0, max_subleaf_level, ebx, ecx, edx);
+ if (ebx & bit_BMI)
+ set_feature (FEATURE_BMI);
+ if (ebx & bit_SGX)
+@@ -761,18 +761,21 @@ get_available_features (struct __processor_model *cpu_model,
+ set_feature (FEATURE_AVX512FP16);
+ }
+
+- __cpuid_count (7, 1, eax, ebx, ecx, edx);
+- if (eax & bit_HRESET)
+- set_feature (FEATURE_HRESET);
+- if (avx_usable)
+- {
+- if (eax & bit_AVXVNNI)
+- set_feature (FEATURE_AVXVNNI);
+- }
+- if (avx512_usable)
++ if (max_subleaf_level >= 1)
+ {
+- if (eax & bit_AVX512BF16)
+- set_feature (FEATURE_AVX512BF16);
++ __cpuid_count (7, 1, eax, ebx, ecx, edx);
++ if (eax & bit_HRESET)
++ set_feature (FEATURE_HRESET);
++ if (avx_usable)
++ {
++ if (eax & bit_AVXVNNI)
++ set_feature (FEATURE_AVXVNNI);
++ }
++ if (avx512_usable)
++ {
++ if (eax & bit_AVX512BF16)
++ set_feature (FEATURE_AVX512BF16);
++ }
+ }
+ }
+
+--- a/src/gcc/config/aarch64/aarch64-builtins.cc
++++ b/src/gcc/config/aarch64/aarch64-builtins.cc
+@@ -751,6 +751,16 @@ aarch64_general_add_builtin (const char *name, tree type, unsigned int code,
+ NULL, attrs);
+ }
+
++static tree
++aarch64_general_simulate_builtin (const char *name, tree fntype,
++ unsigned int code,
++ tree attrs = NULL_TREE)
++{
++ code = (code << AARCH64_BUILTIN_SHIFT) | AARCH64_BUILTIN_GENERAL;
++ return simulate_builtin_function_decl (input_location, name, fntype,
++ code, NULL, attrs);
++}
++
+ static const char *
+ aarch64_mangle_builtin_scalar_type (const_tree type)
+ {
+@@ -1634,11 +1644,11 @@ aarch64_init_ls64_builtins_types (void)
+ gcc_assert (TYPE_ALIGN (array_type) == 64);
+
+ tree field = build_decl (input_location, FIELD_DECL,
+- get_identifier ("val"), array_type);
++ get_identifier ("val"), array_type);
+
+ ls64_arm_data_t = lang_hooks.types.simulate_record_decl (input_location,
+- tuple_type_name,
+- make_array_slice (&field, 1));
++ tuple_type_name,
++ make_array_slice (&field, 1));
+
+ gcc_assert (TYPE_MODE (ls64_arm_data_t) == V8DImode);
+ gcc_assert (TYPE_MODE_RAW (ls64_arm_data_t) == TYPE_MODE (ls64_arm_data_t));
+@@ -1651,23 +1661,24 @@ aarch64_init_ls64_builtins (void)
+ aarch64_init_ls64_builtins_types ();
+
+ ls64_builtins_data data[4] = {
+- {"__builtin_aarch64_ld64b", AARCH64_LS64_BUILTIN_LD64B,
++ {"__arm_ld64b", AARCH64_LS64_BUILTIN_LD64B,
+ build_function_type_list (ls64_arm_data_t,
+- const_ptr_type_node, NULL_TREE)},
+- {"__builtin_aarch64_st64b", AARCH64_LS64_BUILTIN_ST64B,
++ const_ptr_type_node, NULL_TREE)},
++ {"__arm_st64b", AARCH64_LS64_BUILTIN_ST64B,
+ build_function_type_list (void_type_node, ptr_type_node,
+- ls64_arm_data_t, NULL_TREE)},
+- {"__builtin_aarch64_st64bv", AARCH64_LS64_BUILTIN_ST64BV,
++ ls64_arm_data_t, NULL_TREE)},
++ {"__arm_st64bv", AARCH64_LS64_BUILTIN_ST64BV,
+ build_function_type_list (uint64_type_node, ptr_type_node,
+- ls64_arm_data_t, NULL_TREE)},
+- {"__builtin_aarch64_st64bv0", AARCH64_LS64_BUILTIN_ST64BV0,
++ ls64_arm_data_t, NULL_TREE)},
++ {"__arm_st64bv0", AARCH64_LS64_BUILTIN_ST64BV0,
+ build_function_type_list (uint64_type_node, ptr_type_node,
+- ls64_arm_data_t, NULL_TREE)},
++ ls64_arm_data_t, NULL_TREE)},
+ };
+
+ for (size_t i = 0; i < ARRAY_SIZE (data); ++i)
+ aarch64_builtin_decls[data[i].code]
+- = aarch64_general_add_builtin (data[i].name, data[i].type, data[i].code);
++ = aarch64_general_simulate_builtin (data[i].name, data[i].type,
++ data[i].code);
+ }
+
+ static void
+@@ -1800,6 +1811,9 @@ aarch64_general_init_builtins (void)
+
+ if (TARGET_MEMTAG)
+ aarch64_init_memtag_builtins ();
++
++ if (in_lto_p)
++ handle_arm_acle_h ();
+ }
+
+ /* Implement TARGET_BUILTIN_DECL for the AARCH64_BUILTIN_GENERAL group. */
+@@ -2281,40 +2295,40 @@ aarch64_expand_builtin_ls64 (int fcode, tree exp, rtx target)
+ {
+ case AARCH64_LS64_BUILTIN_LD64B:
+ {
+- rtx op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
+- create_output_operand (&ops[0], target, V8DImode);
+- create_input_operand (&ops[1], op0, DImode);
+- expand_insn (CODE_FOR_ld64b, 2, ops);
+- return ops[0].value;
++ rtx op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
++ create_output_operand (&ops[0], target, V8DImode);
++ create_input_operand (&ops[1], op0, DImode);
++ expand_insn (CODE_FOR_ld64b, 2, ops);
++ return ops[0].value;
+ }
+ case AARCH64_LS64_BUILTIN_ST64B:
+ {
+- rtx op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
+- rtx op1 = expand_normal (CALL_EXPR_ARG (exp, 1));
+- create_output_operand (&ops[0], op0, DImode);
+- create_input_operand (&ops[1], op1, V8DImode);
+- expand_insn (CODE_FOR_st64b, 2, ops);
+- return const0_rtx;
++ rtx op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
++ rtx op1 = expand_normal (CALL_EXPR_ARG (exp, 1));
++ create_input_operand (&ops[0], op0, DImode);
++ create_input_operand (&ops[1], op1, V8DImode);
++ expand_insn (CODE_FOR_st64b, 2, ops);
++ return const0_rtx;
+ }
+ case AARCH64_LS64_BUILTIN_ST64BV:
+ {
+- rtx op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
+- rtx op1 = expand_normal (CALL_EXPR_ARG (exp, 1));
+- create_output_operand (&ops[0], target, DImode);
+- create_input_operand (&ops[1], op0, DImode);
+- create_input_operand (&ops[2], op1, V8DImode);
+- expand_insn (CODE_FOR_st64bv, 3, ops);
+- return ops[0].value;
++ rtx op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
++ rtx op1 = expand_normal (CALL_EXPR_ARG (exp, 1));
++ create_output_operand (&ops[0], target, DImode);
++ create_input_operand (&ops[1], op0, DImode);
++ create_input_operand (&ops[2], op1, V8DImode);
++ expand_insn (CODE_FOR_st64bv, 3, ops);
++ return ops[0].value;
+ }
+ case AARCH64_LS64_BUILTIN_ST64BV0:
+ {
+- rtx op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
+- rtx op1 = expand_normal (CALL_EXPR_ARG (exp, 1));
+- create_output_operand (&ops[0], target, DImode);
+- create_input_operand (&ops[1], op0, DImode);
+- create_input_operand (&ops[2], op1, V8DImode);
+- expand_insn (CODE_FOR_st64bv0, 3, ops);
+- return ops[0].value;
++ rtx op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
++ rtx op1 = expand_normal (CALL_EXPR_ARG (exp, 1));
++ create_output_operand (&ops[0], target, DImode);
++ create_input_operand (&ops[1], op0, DImode);
++ create_input_operand (&ops[2], op1, V8DImode);
++ expand_insn (CODE_FOR_st64bv0, 3, ops);
++ return ops[0].value;
+ }
+ }
+
+--- a/src/gcc/config/aarch64/aarch64-protos.h
++++ b/src/gcc/config/aarch64/aarch64-protos.h
+@@ -781,6 +781,7 @@ bool aarch64_emit_approx_div (rtx, rtx, rtx);
+ bool aarch64_emit_approx_sqrt (rtx, rtx, bool);
+ tree aarch64_vector_load_decl (tree);
+ void aarch64_expand_call (rtx, rtx, rtx, bool);
++bool aarch64_expand_cpymem_mops (rtx *, bool);
+ bool aarch64_expand_cpymem (rtx *);
+ bool aarch64_expand_setmem (rtx *);
+ bool aarch64_float_const_zero_rtx_p (rtx);
+--- a/src/gcc/config/aarch64/aarch64.cc
++++ b/src/gcc/config/aarch64/aarch64.cc
+@@ -8133,18 +8133,32 @@ aarch64_needs_frame_chain (void)
+ return aarch64_use_frame_pointer;
+ }
+
++/* Return true if the current function should save registers above
++ the locals area, rather than below it. */
++
++static bool
++aarch64_save_regs_above_locals_p ()
++{
++ /* When using stack smash protection, make sure that the canary slot
++ comes between the locals and the saved registers. Otherwise,
++ it would be possible for a carefully sized smash attack to change
++ the saved registers (particularly LR and FP) without reaching the
++ canary. */
++ return crtl->stack_protect_guard;
++}
++
+ /* Mark the registers that need to be saved by the callee and calculate
+ the size of the callee-saved registers area and frame record (both FP
+ and LR may be omitted). */
+ static void
+ aarch64_layout_frame (void)
+ {
+- poly_int64 offset = 0;
+ int regno, last_fp_reg = INVALID_REGNUM;
+ machine_mode vector_save_mode = aarch64_reg_save_mode (V8_REGNUM);
+ poly_int64 vector_save_size = GET_MODE_SIZE (vector_save_mode);
+ bool frame_related_fp_reg_p = false;
+ aarch64_frame &frame = cfun->machine->frame;
++ poly_int64 top_of_locals = -1;
+
+ frame.emit_frame_chain = aarch64_needs_frame_chain ();
+
+@@ -8211,11 +8225,18 @@ aarch64_layout_frame (void)
+ && !crtl->abi->clobbers_full_reg_p (regno))
+ frame.reg_offset[regno] = SLOT_REQUIRED;
+
+- /* With stack-clash, LR must be saved in non-leaf functions. The saving of
+- LR counts as an implicit probe which allows us to maintain the invariant
+- described in the comment at expand_prologue. */
+- gcc_assert (crtl->is_leaf
+- || maybe_ne (frame.reg_offset[R30_REGNUM], SLOT_NOT_REQUIRED));
++ bool regs_at_top_p = aarch64_save_regs_above_locals_p ();
++
++ poly_int64 offset = crtl->outgoing_args_size;
++ gcc_assert (multiple_p (offset, STACK_BOUNDARY / BITS_PER_UNIT));
++ if (regs_at_top_p)
++ {
++ offset += get_frame_size ();
++ offset = aligned_upper_bound (offset, STACK_BOUNDARY / BITS_PER_UNIT);
++ top_of_locals = offset;
++ }
++ frame.bytes_below_saved_regs = offset;
++ frame.sve_save_and_probe = INVALID_REGNUM;
+
+ /* Now assign stack slots for the registers. Start with the predicate
+ registers, since predicate LDR and STR have a relatively small
+@@ -8223,11 +8244,14 @@ aarch64_layout_frame (void)
+ for (regno = P0_REGNUM; regno <= P15_REGNUM; regno++)
+ if (known_eq (frame.reg_offset[regno], SLOT_REQUIRED))
+ {
++ if (frame.sve_save_and_probe == INVALID_REGNUM)
++ frame.sve_save_and_probe = regno;
+ frame.reg_offset[regno] = offset;
+ offset += BYTES_PER_SVE_PRED;
+ }
+
+- if (maybe_ne (offset, 0))
++ poly_int64 saved_prs_size = offset - frame.bytes_below_saved_regs;
++ if (maybe_ne (saved_prs_size, 0))
+ {
+ /* If we have any vector registers to save above the predicate registers,
+ the offset of the vector register save slots need to be a multiple
+@@ -8245,10 +8269,10 @@ aarch64_layout_frame (void)
+ offset = aligned_upper_bound (offset, STACK_BOUNDARY / BITS_PER_UNIT);
+ else
+ {
+- if (known_le (offset, vector_save_size))
+- offset = vector_save_size;
+- else if (known_le (offset, vector_save_size * 2))
+- offset = vector_save_size * 2;
++ if (known_le (saved_prs_size, vector_save_size))
++ offset = frame.bytes_below_saved_regs + vector_save_size;
++ else if (known_le (saved_prs_size, vector_save_size * 2))
++ offset = frame.bytes_below_saved_regs + vector_save_size * 2;
+ else
+ gcc_unreachable ();
+ }
+@@ -8259,34 +8283,53 @@ aarch64_layout_frame (void)
+ for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
+ if (known_eq (frame.reg_offset[regno], SLOT_REQUIRED))
+ {
++ if (frame.sve_save_and_probe == INVALID_REGNUM)
++ frame.sve_save_and_probe = regno;
+ frame.reg_offset[regno] = offset;
+ offset += vector_save_size;
+ }
+
+ /* OFFSET is now the offset of the hard frame pointer from the bottom
+ of the callee save area. */
+- bool saves_below_hard_fp_p = maybe_ne (offset, 0);
+- frame.below_hard_fp_saved_regs_size = offset;
++ auto below_hard_fp_saved_regs_size = offset - frame.bytes_below_saved_regs;
++ bool saves_below_hard_fp_p = maybe_ne (below_hard_fp_saved_regs_size, 0);
++ gcc_assert (!saves_below_hard_fp_p
++ || (frame.sve_save_and_probe != INVALID_REGNUM
++ && known_eq (frame.reg_offset[frame.sve_save_and_probe],
++ frame.bytes_below_saved_regs)));
++
++ frame.bytes_below_hard_fp = offset;
++ frame.hard_fp_save_and_probe = INVALID_REGNUM;
++
++ auto allocate_gpr_slot = [&](unsigned int regno)
++ {
++ if (frame.hard_fp_save_and_probe == INVALID_REGNUM)
++ frame.hard_fp_save_and_probe = regno;
++ frame.reg_offset[regno] = offset;
++ if (frame.wb_push_candidate1 == INVALID_REGNUM)
++ frame.wb_push_candidate1 = regno;
++ else if (frame.wb_push_candidate2 == INVALID_REGNUM)
++ frame.wb_push_candidate2 = regno;
++ offset += UNITS_PER_WORD;
++ };
++
+ if (frame.emit_frame_chain)
+ {
+ /* FP and LR are placed in the linkage record. */
+- frame.reg_offset[R29_REGNUM] = offset;
+- frame.wb_push_candidate1 = R29_REGNUM;
+- frame.reg_offset[R30_REGNUM] = offset + UNITS_PER_WORD;
+- frame.wb_push_candidate2 = R30_REGNUM;
+- offset += 2 * UNITS_PER_WORD;
++ allocate_gpr_slot (R29_REGNUM);
++ allocate_gpr_slot (R30_REGNUM);
+ }
++ else if (flag_stack_clash_protection
++ && known_eq (frame.reg_offset[R30_REGNUM], SLOT_REQUIRED))
++ /* Put the LR save slot first, since it makes a good choice of probe
++ for stack clash purposes. The idea is that the link register usually
++ has to be saved before a call anyway, and so we lose little by
++ stopping it from being individually shrink-wrapped. */
++ allocate_gpr_slot (R30_REGNUM);
+
+ for (regno = R0_REGNUM; regno <= R30_REGNUM; regno++)
+ if (known_eq (frame.reg_offset[regno], SLOT_REQUIRED))
+- {
+- frame.reg_offset[regno] = offset;
+- if (frame.wb_push_candidate1 == INVALID_REGNUM)
+- frame.wb_push_candidate1 = regno;
+- else if (frame.wb_push_candidate2 == INVALID_REGNUM)
+- frame.wb_push_candidate2 = regno;
+- offset += UNITS_PER_WORD;
+- }
++ allocate_gpr_slot (regno);
+
+ poly_int64 max_int_offset = offset;
+ offset = aligned_upper_bound (offset, STACK_BOUNDARY / BITS_PER_UNIT);
+@@ -8295,6 +8338,8 @@ aarch64_layout_frame (void)
+ for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
+ if (known_eq (frame.reg_offset[regno], SLOT_REQUIRED))
+ {
++ if (frame.hard_fp_save_and_probe == INVALID_REGNUM)
++ frame.hard_fp_save_and_probe = regno;
+ /* If there is an alignment gap between integer and fp callee-saves,
+ allocate the last fp register to it if possible. */
+ if (regno == last_fp_reg
+@@ -8317,30 +8362,36 @@ aarch64_layout_frame (void)
+
+ offset = aligned_upper_bound (offset, STACK_BOUNDARY / BITS_PER_UNIT);
+
+- frame.saved_regs_size = offset;
+-
+- poly_int64 varargs_and_saved_regs_size = offset + frame.saved_varargs_size;
++ auto saved_regs_size = offset - frame.bytes_below_saved_regs;
++ gcc_assert (known_eq (saved_regs_size, below_hard_fp_saved_regs_size)
++ || (frame.hard_fp_save_and_probe != INVALID_REGNUM
++ && known_eq (frame.reg_offset[frame.hard_fp_save_and_probe],
++ frame.bytes_below_hard_fp)));
+
+- poly_int64 above_outgoing_args
+- = aligned_upper_bound (varargs_and_saved_regs_size
+- + get_frame_size (),
+- STACK_BOUNDARY / BITS_PER_UNIT);
+-
+- frame.hard_fp_offset
+- = above_outgoing_args - frame.below_hard_fp_saved_regs_size;
++ /* With stack-clash, a register must be saved in non-leaf functions.
++ The saving of the bottommost register counts as an implicit probe,
++ which allows us to maintain the invariant described in the comment
++ at expand_prologue. */
++ gcc_assert (crtl->is_leaf || maybe_ne (saved_regs_size, 0));
+
+- /* Both these values are already aligned. */
+- gcc_assert (multiple_p (crtl->outgoing_args_size,
+- STACK_BOUNDARY / BITS_PER_UNIT));
+- frame.frame_size = above_outgoing_args + crtl->outgoing_args_size;
++ if (!regs_at_top_p)
++ {
++ offset += get_frame_size ();
++ offset = aligned_upper_bound (offset, STACK_BOUNDARY / BITS_PER_UNIT);
++ top_of_locals = offset;
++ }
++ offset += frame.saved_varargs_size;
++ gcc_assert (multiple_p (offset, STACK_BOUNDARY / BITS_PER_UNIT));
++ frame.frame_size = offset;
+
+- frame.locals_offset = frame.saved_varargs_size;
++ frame.bytes_above_hard_fp = frame.frame_size - frame.bytes_below_hard_fp;
++ gcc_assert (known_ge (top_of_locals, 0));
++ frame.bytes_above_locals = frame.frame_size - top_of_locals;
+
+ frame.initial_adjust = 0;
+ frame.final_adjust = 0;
+ frame.callee_adjust = 0;
+ frame.sve_callee_adjust = 0;
+- frame.callee_offset = 0;
+
+ frame.wb_pop_candidate1 = frame.wb_push_candidate1;
+ frame.wb_pop_candidate2 = frame.wb_push_candidate2;
+@@ -8351,7 +8402,7 @@ aarch64_layout_frame (void)
+ frame.is_scs_enabled
+ = (!crtl->calls_eh_return
+ && sanitize_flags_p (SANITIZE_SHADOW_CALL_STACK)
+- && known_ge (cfun->machine->frame.reg_offset[LR_REGNUM], 0));
++ && known_ge (frame.reg_offset[LR_REGNUM], 0));
+
+ /* When shadow call stack is enabled, the scs_pop in the epilogue will
+ restore x30, and we don't need to pop x30 again in the traditional
+@@ -8371,75 +8422,76 @@ aarch64_layout_frame (void)
+ max_push_offset to 0, because no registers are popped at this time,
+ so callee_adjust cannot be adjusted. */
+ HOST_WIDE_INT max_push_offset = 0;
+- if (frame.wb_pop_candidate2 != INVALID_REGNUM)
+- max_push_offset = 512;
+- else if (frame.wb_pop_candidate1 != INVALID_REGNUM)
+- max_push_offset = 256;
++ if (frame.wb_pop_candidate1 != INVALID_REGNUM)
++ {
++ if (frame.wb_pop_candidate2 != INVALID_REGNUM)
++ max_push_offset = 512;
++ else
++ max_push_offset = 256;
++ }
+
+- HOST_WIDE_INT const_size, const_outgoing_args_size, const_fp_offset;
++ HOST_WIDE_INT const_size, const_below_saved_regs, const_above_fp;
+ HOST_WIDE_INT const_saved_regs_size;
+- if (frame.frame_size.is_constant (&const_size)
+- && const_size < max_push_offset
+- && known_eq (frame.hard_fp_offset, const_size))
++ if (known_eq (saved_regs_size, 0))
++ frame.initial_adjust = frame.frame_size;
++ else if (frame.frame_size.is_constant (&const_size)
++ && const_size < max_push_offset
++ && known_eq (frame.bytes_above_hard_fp, const_size))
+ {
+- /* Simple, small frame with no outgoing arguments:
++ /* Simple, small frame with no data below the saved registers.
+
+ stp reg1, reg2, [sp, -frame_size]!
+ stp reg3, reg4, [sp, 16] */
+ frame.callee_adjust = const_size;
+ }
+- else if (crtl->outgoing_args_size.is_constant (&const_outgoing_args_size)
+- && frame.saved_regs_size.is_constant (&const_saved_regs_size)
+- && const_outgoing_args_size + const_saved_regs_size < 512
+- /* We could handle this case even with outgoing args, provided
+- that the number of args left us with valid offsets for all
+- predicate and vector save slots. It's such a rare case that
+- it hardly seems worth the effort though. */
+- && (!saves_below_hard_fp_p || const_outgoing_args_size == 0)
++ else if (frame.bytes_below_saved_regs.is_constant (&const_below_saved_regs)
++ && saved_regs_size.is_constant (&const_saved_regs_size)
++ && const_below_saved_regs + const_saved_regs_size < 512
++ /* We could handle this case even with data below the saved
++ registers, provided that that data left us with valid offsets
++ for all predicate and vector save slots. It's such a rare
++ case that it hardly seems worth the effort though. */
++ && (!saves_below_hard_fp_p || const_below_saved_regs == 0)
+ && !(cfun->calls_alloca
+- && frame.hard_fp_offset.is_constant (&const_fp_offset)
+- && const_fp_offset < max_push_offset))
++ && frame.bytes_above_hard_fp.is_constant (&const_above_fp)
++ && const_above_fp < max_push_offset))
+ {
+- /* Frame with small outgoing arguments:
++ /* Frame with small area below the saved registers:
+
+ sub sp, sp, frame_size
+- stp reg1, reg2, [sp, outgoing_args_size]
+- stp reg3, reg4, [sp, outgoing_args_size + 16] */
++ stp reg1, reg2, [sp, bytes_below_saved_regs]
++ stp reg3, reg4, [sp, bytes_below_saved_regs + 16] */
+ frame.initial_adjust = frame.frame_size;
+- frame.callee_offset = const_outgoing_args_size;
+ }
+ else if (saves_below_hard_fp_p
+- && known_eq (frame.saved_regs_size,
+- frame.below_hard_fp_saved_regs_size))
++ && known_eq (saved_regs_size, below_hard_fp_saved_regs_size))
+ {
+ /* Frame in which all saves are SVE saves:
+
+- sub sp, sp, hard_fp_offset + below_hard_fp_saved_regs_size
++ sub sp, sp, frame_size - bytes_below_saved_regs
+ save SVE registers relative to SP
+- sub sp, sp, outgoing_args_size */
+- frame.initial_adjust = (frame.hard_fp_offset
+- + frame.below_hard_fp_saved_regs_size);
+- frame.final_adjust = crtl->outgoing_args_size;
++ sub sp, sp, bytes_below_saved_regs */
++ frame.initial_adjust = frame.frame_size - frame.bytes_below_saved_regs;
++ frame.final_adjust = frame.bytes_below_saved_regs;
+ }
+- else if (frame.hard_fp_offset.is_constant (&const_fp_offset)
+- && const_fp_offset < max_push_offset)
++ else if (frame.bytes_above_hard_fp.is_constant (&const_above_fp)
++ && const_above_fp < max_push_offset)
+ {
+- /* Frame with large outgoing arguments or SVE saves, but with
+- a small local area:
++ /* Frame with large area below the saved registers, or with SVE saves,
++ but with a small area above:
+
+ stp reg1, reg2, [sp, -hard_fp_offset]!
+ stp reg3, reg4, [sp, 16]
+ [sub sp, sp, below_hard_fp_saved_regs_size]
+ [save SVE registers relative to SP]
+- sub sp, sp, outgoing_args_size */
+- frame.callee_adjust = const_fp_offset;
+- frame.sve_callee_adjust = frame.below_hard_fp_saved_regs_size;
+- frame.final_adjust = crtl->outgoing_args_size;
++ sub sp, sp, bytes_below_saved_regs */
++ frame.callee_adjust = const_above_fp;
++ frame.sve_callee_adjust = below_hard_fp_saved_regs_size;
++ frame.final_adjust = frame.bytes_below_saved_regs;
+ }
+ else
+ {
+- /* Frame with large local area and outgoing arguments or SVE saves,
+- using frame pointer:
++ /* General case:
+
+ sub sp, sp, hard_fp_offset
+ stp x29, x30, [sp, 0]
+@@ -8447,10 +8499,29 @@ aarch64_layout_frame (void)
+ stp reg3, reg4, [sp, 16]
+ [sub sp, sp, below_hard_fp_saved_regs_size]
+ [save SVE registers relative to SP]
+- sub sp, sp, outgoing_args_size */
+- frame.initial_adjust = frame.hard_fp_offset;
+- frame.sve_callee_adjust = frame.below_hard_fp_saved_regs_size;
+- frame.final_adjust = crtl->outgoing_args_size;
++ sub sp, sp, bytes_below_saved_regs */
++ frame.initial_adjust = frame.bytes_above_hard_fp;
++ frame.sve_callee_adjust = below_hard_fp_saved_regs_size;
++ frame.final_adjust = frame.bytes_below_saved_regs;
++ }
++
++ /* The frame is allocated in pieces, with each non-final piece
++ including a register save at offset 0 that acts as a probe for
++ the following piece. In addition, the save of the bottommost register
++ acts as a probe for callees and allocas. Roll back any probes that
++ aren't needed.
++
++ A probe isn't needed if it is associated with the final allocation
++ (including callees and allocas) that happens before the epilogue is
++ executed. */
++ if (crtl->is_leaf
++ && !cfun->calls_alloca
++ && known_eq (frame.final_adjust, 0))
++ {
++ if (maybe_ne (frame.sve_callee_adjust, 0))
++ frame.sve_save_and_probe = INVALID_REGNUM;
++ else
++ frame.hard_fp_save_and_probe = INVALID_REGNUM;
+ }
+
+ /* Make sure the individual adjustments add up to the full frame size. */
+@@ -8754,15 +8825,17 @@ aarch64_add_cfa_expression (rtx_insn *insn, rtx reg,
+ }
+
+ /* Emit code to save the callee-saved registers from register number START
+- to LIMIT to the stack at the location starting at offset START_OFFSET,
+- skipping any write-back candidates if SKIP_WB is true. HARD_FP_VALID_P
+- is true if the hard frame pointer has been set up. */
++ to LIMIT to the stack. The stack pointer is currently BYTES_BELOW_SP
++ bytes above the bottom of the static frame. Skip any write-back
++ candidates if SKIP_WB is true. HARD_FP_VALID_P is true if the hard
++ frame pointer has been set up. */
+
+ static void
+-aarch64_save_callee_saves (poly_int64 start_offset,
++aarch64_save_callee_saves (poly_int64 bytes_below_sp,
+ unsigned start, unsigned limit, bool skip_wb,
+ bool hard_fp_valid_p)
+ {
++ aarch64_frame &frame = cfun->machine->frame;
+ rtx_insn *insn;
+ unsigned regno;
+ unsigned regno2;
+@@ -8777,8 +8850,8 @@ aarch64_save_callee_saves (poly_int64 start_offset,
+ bool frame_related_p = aarch64_emit_cfi_for_reg_p (regno);
+
+ if (skip_wb
+- && (regno == cfun->machine->frame.wb_push_candidate1
+- || regno == cfun->machine->frame.wb_push_candidate2))
++ && (regno == frame.wb_push_candidate1
++ || regno == frame.wb_push_candidate2))
+ continue;
+
+ if (cfun->machine->reg_is_wrapped_separately[regno])
+@@ -8786,7 +8859,7 @@ aarch64_save_callee_saves (poly_int64 start_offset,
+
+ machine_mode mode = aarch64_reg_save_mode (regno);
+ reg = gen_rtx_REG (mode, regno);
+- offset = start_offset + cfun->machine->frame.reg_offset[regno];
++ offset = frame.reg_offset[regno] - bytes_below_sp;
+ rtx base_rtx = stack_pointer_rtx;
+ poly_int64 sp_offset = offset;
+
+@@ -8797,9 +8870,7 @@ aarch64_save_callee_saves (poly_int64 start_offset,
+ else if (GP_REGNUM_P (regno)
+ && (!offset.is_constant (&const_offset) || const_offset >= 512))
+ {
+- gcc_assert (known_eq (start_offset, 0));
+- poly_int64 fp_offset
+- = cfun->machine->frame.below_hard_fp_saved_regs_size;
++ poly_int64 fp_offset = frame.bytes_below_hard_fp - bytes_below_sp;
+ if (hard_fp_valid_p)
+ base_rtx = hard_frame_pointer_rtx;
+ else
+@@ -8821,8 +8892,7 @@ aarch64_save_callee_saves (poly_int64 start_offset,
+ && (regno2 = aarch64_next_callee_save (regno + 1, limit)) <= limit
+ && !cfun->machine->reg_is_wrapped_separately[regno2]
+ && known_eq (GET_MODE_SIZE (mode),
+- cfun->machine->frame.reg_offset[regno2]
+- - cfun->machine->frame.reg_offset[regno]))
++ frame.reg_offset[regno2] - frame.reg_offset[regno]))
+ {
+ rtx reg2 = gen_rtx_REG (mode, regno2);
+ rtx mem2;
+@@ -8864,14 +8934,16 @@ aarch64_save_callee_saves (poly_int64 start_offset,
+ }
+
+ /* Emit code to restore the callee registers from register number START
+- up to and including LIMIT. Restore from the stack offset START_OFFSET,
+- skipping any write-back candidates if SKIP_WB is true. Write the
+- appropriate REG_CFA_RESTORE notes into CFI_OPS. */
++ up to and including LIMIT. The stack pointer is currently BYTES_BELOW_SP
++ bytes above the bottom of the static frame. Skip any write-back
++ candidates if SKIP_WB is true. Write the appropriate REG_CFA_RESTORE
++ notes into CFI_OPS. */
+
+ static void
+-aarch64_restore_callee_saves (poly_int64 start_offset, unsigned start,
++aarch64_restore_callee_saves (poly_int64 bytes_below_sp, unsigned start,
+ unsigned limit, bool skip_wb, rtx *cfi_ops)
+ {
++ aarch64_frame &frame = cfun->machine->frame;
+ unsigned regno;
+ unsigned regno2;
+ poly_int64 offset;
+@@ -8888,13 +8960,13 @@ aarch64_restore_callee_saves (poly_int64 start_offset, unsigned start,
+ rtx reg, mem;
+
+ if (skip_wb
+- && (regno == cfun->machine->frame.wb_pop_candidate1
+- || regno == cfun->machine->frame.wb_pop_candidate2))
++ && (regno == frame.wb_pop_candidate1
++ || regno == frame.wb_pop_candidate2))
+ continue;
+
+ machine_mode mode = aarch64_reg_save_mode (regno);
+ reg = gen_rtx_REG (mode, regno);
+- offset = start_offset + cfun->machine->frame.reg_offset[regno];
++ offset = frame.reg_offset[regno] - bytes_below_sp;
+ rtx base_rtx = stack_pointer_rtx;
+ if (mode == VNx2DImode && BYTES_BIG_ENDIAN)
+ aarch64_adjust_sve_callee_save_base (mode, base_rtx, anchor_reg,
+@@ -8905,8 +8977,7 @@ aarch64_restore_callee_saves (poly_int64 start_offset, unsigned start,
+ && (regno2 = aarch64_next_callee_save (regno + 1, limit)) <= limit
+ && !cfun->machine->reg_is_wrapped_separately[regno2]
+ && known_eq (GET_MODE_SIZE (mode),
+- cfun->machine->frame.reg_offset[regno2]
+- - cfun->machine->frame.reg_offset[regno]))
++ frame.reg_offset[regno2] - frame.reg_offset[regno]))
+ {
+ rtx reg2 = gen_rtx_REG (mode, regno2);
+ rtx mem2;
+@@ -9011,6 +9082,7 @@ offset_12bit_unsigned_scaled_p (machine_mode mode, poly_int64 offset)
+ static sbitmap
+ aarch64_get_separate_components (void)
+ {
++ aarch64_frame &frame = cfun->machine->frame;
+ sbitmap components = sbitmap_alloc (LAST_SAVED_REGNUM + 1);
+ bitmap_clear (components);
+
+@@ -9027,20 +9099,11 @@ aarch64_get_separate_components (void)
+ if (mode == VNx2DImode && BYTES_BIG_ENDIAN)
+ continue;
+
+- poly_int64 offset = cfun->machine->frame.reg_offset[regno];
+-
+- /* If the register is saved in the first SVE save slot, we use
+- it as a stack probe for -fstack-clash-protection. */
+- if (flag_stack_clash_protection
+- && maybe_ne (cfun->machine->frame.below_hard_fp_saved_regs_size, 0)
+- && known_eq (offset, 0))
+- continue;
++ poly_int64 offset = frame.reg_offset[regno];
+
+ /* Get the offset relative to the register we'll use. */
+ if (frame_pointer_needed)
+- offset -= cfun->machine->frame.below_hard_fp_saved_regs_size;
+- else
+- offset += crtl->outgoing_args_size;
++ offset -= frame.bytes_below_hard_fp;
+
+ /* Check that we can access the stack slot of the register with one
+ direct load with no adjustments needed. */
+@@ -9057,11 +9120,11 @@ aarch64_get_separate_components (void)
+ /* If the spare predicate register used by big-endian SVE code
+ is call-preserved, it must be saved in the main prologue
+ before any saves that use it. */
+- if (cfun->machine->frame.spare_pred_reg != INVALID_REGNUM)
+- bitmap_clear_bit (components, cfun->machine->frame.spare_pred_reg);
++ if (frame.spare_pred_reg != INVALID_REGNUM)
++ bitmap_clear_bit (components, frame.spare_pred_reg);
+
+- unsigned reg1 = cfun->machine->frame.wb_push_candidate1;
+- unsigned reg2 = cfun->machine->frame.wb_push_candidate2;
++ unsigned reg1 = frame.wb_push_candidate1;
++ unsigned reg2 = frame.wb_push_candidate2;
+ /* If registers have been chosen to be stored/restored with
+ writeback don't interfere with them to avoid having to output explicit
+ stack adjustment instructions. */
+@@ -9072,6 +9135,13 @@ aarch64_get_separate_components (void)
+
+ bitmap_clear_bit (components, LR_REGNUM);
+ bitmap_clear_bit (components, SP_REGNUM);
++ if (flag_stack_clash_protection)
++ {
++ if (frame.sve_save_and_probe != INVALID_REGNUM)
++ bitmap_clear_bit (components, frame.sve_save_and_probe);
++ if (frame.hard_fp_save_and_probe != INVALID_REGNUM)
++ bitmap_clear_bit (components, frame.hard_fp_save_and_probe);
++ }
+
+ return components;
+ }
+@@ -9170,6 +9240,7 @@ aarch64_get_next_set_bit (sbitmap bmp, unsigned int start)
+ static void
+ aarch64_process_components (sbitmap components, bool prologue_p)
+ {
++ aarch64_frame &frame = cfun->machine->frame;
+ rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
+ ? HARD_FRAME_POINTER_REGNUM
+ : STACK_POINTER_REGNUM);
+@@ -9184,11 +9255,9 @@ aarch64_process_components (sbitmap components, bool prologue_p)
+ machine_mode mode = aarch64_reg_save_mode (regno);
+
+ rtx reg = gen_rtx_REG (mode, regno);
+- poly_int64 offset = cfun->machine->frame.reg_offset[regno];
++ poly_int64 offset = frame.reg_offset[regno];
+ if (frame_pointer_needed)
+- offset -= cfun->machine->frame.below_hard_fp_saved_regs_size;
+- else
+- offset += crtl->outgoing_args_size;
++ offset -= frame.bytes_below_hard_fp;
+
+ rtx addr = plus_constant (Pmode, ptr_reg, offset);
+ rtx mem = gen_frame_mem (mode, addr);
+@@ -9211,14 +9280,14 @@ aarch64_process_components (sbitmap components, bool prologue_p)
+ break;
+ }
+
+- poly_int64 offset2 = cfun->machine->frame.reg_offset[regno2];
++ poly_int64 offset2 = frame.reg_offset[regno2];
+ /* The next register is not of the same class or its offset is not
+ mergeable with the current one into a pair. */
+ if (aarch64_sve_mode_p (mode)
+ || !satisfies_constraint_Ump (mem)
+ || GP_REGNUM_P (regno) != GP_REGNUM_P (regno2)
+ || (crtl->abi->id () == ARM_PCS_SIMD && FP_REGNUM_P (regno))
+- || maybe_ne ((offset2 - cfun->machine->frame.reg_offset[regno]),
++ || maybe_ne ((offset2 - frame.reg_offset[regno]),
+ GET_MODE_SIZE (mode)))
+ {
+ insn = emit_insn (set);
+@@ -9240,9 +9309,7 @@ aarch64_process_components (sbitmap components, bool prologue_p)
+ /* REGNO2 can be saved/restored in a pair with REGNO. */
+ rtx reg2 = gen_rtx_REG (mode, regno2);
+ if (frame_pointer_needed)
+- offset2 -= cfun->machine->frame.below_hard_fp_saved_regs_size;
+- else
+- offset2 += crtl->outgoing_args_size;
++ offset2 -= frame.bytes_below_hard_fp;
+ rtx addr2 = plus_constant (Pmode, ptr_reg, offset2);
+ rtx mem2 = gen_frame_mem (mode, addr2);
+ rtx set2 = prologue_p ? gen_rtx_SET (mem2, reg2)
+@@ -9316,10 +9383,10 @@ aarch64_stack_clash_protection_alloca_probe_range (void)
+ registers. If POLY_SIZE is not large enough to require a probe this function
+ will only adjust the stack. When allocating the stack space
+ FRAME_RELATED_P is then used to indicate if the allocation is frame related.
+- FINAL_ADJUSTMENT_P indicates whether we are allocating the outgoing
+- arguments. If we are then we ensure that any allocation larger than the ABI
+- defined buffer needs a probe so that the invariant of having a 1KB buffer is
+- maintained.
++ FINAL_ADJUSTMENT_P indicates whether we are allocating the area below
++ the saved registers. If we are then we ensure that any allocation
++ larger than the ABI defined buffer needs a probe so that the
++ invariant of having a 1KB buffer is maintained.
+
+ We emit barriers after each stack adjustment to prevent optimizations from
+ breaking the invariant that we never drop the stack more than a page. This
+@@ -9335,45 +9402,26 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+ bool frame_related_p,
+ bool final_adjustment_p)
+ {
++ aarch64_frame &frame = cfun->machine->frame;
+ HOST_WIDE_INT guard_size
+ = 1 << param_stack_clash_protection_guard_size;
+ HOST_WIDE_INT guard_used_by_caller = STACK_CLASH_CALLER_GUARD;
++ HOST_WIDE_INT byte_sp_alignment = STACK_BOUNDARY / BITS_PER_UNIT;
++ gcc_assert (multiple_p (poly_size, byte_sp_alignment));
+ HOST_WIDE_INT min_probe_threshold
+ = (final_adjustment_p
+- ? guard_used_by_caller
++ ? guard_used_by_caller + byte_sp_alignment
+ : guard_size - guard_used_by_caller);
+- /* When doing the final adjustment for the outgoing arguments, take into
+- account any unprobed space there is above the current SP. There are
+- two cases:
+-
+- - When saving SVE registers below the hard frame pointer, we force
+- the lowest save to take place in the prologue before doing the final
+- adjustment (i.e. we don't allow the save to be shrink-wrapped).
+- This acts as a probe at SP, so there is no unprobed space.
+-
+- - When there are no SVE register saves, we use the store of the link
+- register as a probe. We can't assume that LR was saved at position 0
+- though, so treat any space below it as unprobed. */
+- if (final_adjustment_p
+- && known_eq (cfun->machine->frame.below_hard_fp_saved_regs_size, 0))
+- {
+- poly_int64 lr_offset = cfun->machine->frame.reg_offset[LR_REGNUM];
+- if (known_ge (lr_offset, 0))
+- min_probe_threshold -= lr_offset.to_constant ();
+- else
+- gcc_assert (!flag_stack_clash_protection || known_eq (poly_size, 0));
+- }
+-
+- poly_int64 frame_size = cfun->machine->frame.frame_size;
++ poly_int64 frame_size = frame.frame_size;
+
+ /* We should always have a positive probe threshold. */
+ gcc_assert (min_probe_threshold > 0);
+
+ if (flag_stack_clash_protection && !final_adjustment_p)
+ {
+- poly_int64 initial_adjust = cfun->machine->frame.initial_adjust;
+- poly_int64 sve_callee_adjust = cfun->machine->frame.sve_callee_adjust;
+- poly_int64 final_adjust = cfun->machine->frame.final_adjust;
++ poly_int64 initial_adjust = frame.initial_adjust;
++ poly_int64 sve_callee_adjust = frame.sve_callee_adjust;
++ poly_int64 final_adjust = frame.final_adjust;
+
+ if (known_eq (frame_size, 0))
+ {
+@@ -9527,7 +9575,7 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+ /* Handle any residuals. Residuals of at least MIN_PROBE_THRESHOLD have to
+ be probed. This maintains the requirement that each page is probed at
+ least once. For initial probing we probe only if the allocation is
+- more than GUARD_SIZE - buffer, and for the outgoing arguments we probe
++ more than GUARD_SIZE - buffer, and below the saved registers we probe
+ if the amount is larger than buffer. GUARD_SIZE - buffer + buffer ==
+ GUARD_SIZE. This works that for any allocation that is large enough to
+ trigger a probe here, we'll have at least one, and if they're not large
+@@ -9537,16 +9585,12 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+ are still safe. */
+ if (residual)
+ {
+- HOST_WIDE_INT residual_probe_offset = guard_used_by_caller;
++ gcc_assert (guard_used_by_caller + byte_sp_alignment <= size);
++
+ /* If we're doing final adjustments, and we've done any full page
+ allocations then any residual needs to be probed. */
+ if (final_adjustment_p && rounded_size != 0)
+ min_probe_threshold = 0;
+- /* If doing a small final adjustment, we always probe at offset 0.
+- This is done to avoid issues when LR is not at position 0 or when
+- the final adjustment is smaller than the probing offset. */
+- else if (final_adjustment_p && rounded_size == 0)
+- residual_probe_offset = 0;
+
+ aarch64_sub_sp (temp1, temp2, residual, frame_related_p);
+ if (residual >= min_probe_threshold)
+@@ -9557,8 +9601,8 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+ HOST_WIDE_INT_PRINT_DEC " bytes, probing will be required."
+ "\n", residual);
+
+- emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
+- residual_probe_offset));
++ emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
++ guard_used_by_caller));
+ emit_insn (gen_blockage ());
+ }
+ }
+@@ -9596,20 +9640,24 @@ aarch64_epilogue_uses (int regno)
+ | for register varargs |
+ | |
+ +-------------------------------+
+- | local variables | <-- frame_pointer_rtx
++ | local variables (1) | <-- frame_pointer_rtx
+ | |
+ +-------------------------------+
+- | padding | \
+- +-------------------------------+ |
+- | callee-saved registers | | frame.saved_regs_size
+- +-------------------------------+ |
+- | LR' | |
+- +-------------------------------+ |
+- | FP' | |
+- +-------------------------------+ |<- hard_frame_pointer_rtx (aligned)
+- | SVE vector registers | | \
+- +-------------------------------+ | | below_hard_fp_saved_regs_size
+- | SVE predicate registers | / /
++ | padding (1) |
++ +-------------------------------+
++ | callee-saved registers |
++ +-------------------------------+
++ | LR' |
++ +-------------------------------+
++ | FP' |
++ +-------------------------------+ <-- hard_frame_pointer_rtx (aligned)
++ | SVE vector registers |
++ +-------------------------------+
++ | SVE predicate registers |
++ +-------------------------------+
++ | local variables (2) |
++ +-------------------------------+
++ | padding (2) |
+ +-------------------------------+
+ | dynamic allocation |
+ +-------------------------------+
+@@ -9620,6 +9668,9 @@ aarch64_epilogue_uses (int regno)
+ +-------------------------------+
+ | | <-- stack_pointer_rtx (aligned)
+
++ The regions marked (1) and (2) are mutually exclusive. (2) is used
++ when aarch64_save_regs_above_locals_p is true.
++
+ Dynamic stack allocations via alloca() decrease stack_pointer_rtx
+ but leave frame_pointer_rtx and hard_frame_pointer_rtx
+ unchanged.
+@@ -9634,8 +9685,8 @@ aarch64_epilogue_uses (int regno)
+ When probing is needed, we emit a probe at the start of the prologue
+ and every PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE bytes thereafter.
+
+- We have to track how much space has been allocated and the only stores
+- to the stack we track as implicit probes are the FP/LR stores.
++ We can also use register saves as probes. These are stored in
++ sve_save_and_probe and hard_fp_save_and_probe.
+
+ For outgoing arguments we probe if the size is larger than 1KB, such that
+ the ABI specified buffer is maintained for the next callee.
+@@ -9662,17 +9713,15 @@ aarch64_epilogue_uses (int regno)
+ void
+ aarch64_expand_prologue (void)
+ {
+- poly_int64 frame_size = cfun->machine->frame.frame_size;
+- poly_int64 initial_adjust = cfun->machine->frame.initial_adjust;
+- HOST_WIDE_INT callee_adjust = cfun->machine->frame.callee_adjust;
+- poly_int64 final_adjust = cfun->machine->frame.final_adjust;
+- poly_int64 callee_offset = cfun->machine->frame.callee_offset;
+- poly_int64 sve_callee_adjust = cfun->machine->frame.sve_callee_adjust;
+- poly_int64 below_hard_fp_saved_regs_size
+- = cfun->machine->frame.below_hard_fp_saved_regs_size;
+- unsigned reg1 = cfun->machine->frame.wb_push_candidate1;
+- unsigned reg2 = cfun->machine->frame.wb_push_candidate2;
+- bool emit_frame_chain = cfun->machine->frame.emit_frame_chain;
++ aarch64_frame &frame = cfun->machine->frame;
++ poly_int64 frame_size = frame.frame_size;
++ poly_int64 initial_adjust = frame.initial_adjust;
++ HOST_WIDE_INT callee_adjust = frame.callee_adjust;
++ poly_int64 final_adjust = frame.final_adjust;
++ poly_int64 sve_callee_adjust = frame.sve_callee_adjust;
++ unsigned reg1 = frame.wb_push_candidate1;
++ unsigned reg2 = frame.wb_push_candidate2;
++ bool emit_frame_chain = frame.emit_frame_chain;
+ rtx_insn *insn;
+
+ if (flag_stack_clash_protection && known_eq (callee_adjust, 0))
+@@ -9703,7 +9752,7 @@ aarch64_expand_prologue (void)
+ }
+
+ /* Push return address to shadow call stack. */
+- if (cfun->machine->frame.is_scs_enabled)
++ if (frame.is_scs_enabled)
+ emit_insn (gen_scs_push ());
+
+ if (flag_stack_usage_info)
+@@ -9740,21 +9789,21 @@ aarch64_expand_prologue (void)
+ if (callee_adjust != 0)
+ aarch64_push_regs (reg1, reg2, callee_adjust);
+
+- /* The offset of the frame chain record (if any) from the current SP. */
+- poly_int64 chain_offset = (initial_adjust + callee_adjust
+- - cfun->machine->frame.hard_fp_offset);
+- gcc_assert (known_ge (chain_offset, 0));
+-
+- /* The offset of the bottom of the save area from the current SP. */
+- poly_int64 saved_regs_offset = chain_offset - below_hard_fp_saved_regs_size;
++ /* The offset of the current SP from the bottom of the static frame. */
++ poly_int64 bytes_below_sp = frame_size - initial_adjust - callee_adjust;
+
+ if (emit_frame_chain)
+ {
++ /* The offset of the frame chain record (if any) from the current SP. */
++ poly_int64 chain_offset = (initial_adjust + callee_adjust
++ - frame.bytes_above_hard_fp);
++ gcc_assert (known_ge (chain_offset, 0));
++
+ if (callee_adjust == 0)
+ {
+ reg1 = R29_REGNUM;
+ reg2 = R30_REGNUM;
+- aarch64_save_callee_saves (saved_regs_offset, reg1, reg2,
++ aarch64_save_callee_saves (bytes_below_sp, reg1, reg2,
+ false, false);
+ }
+ else
+@@ -9779,8 +9828,7 @@ aarch64_expand_prologue (void)
+ implicit. */
+ if (!find_reg_note (insn, REG_CFA_ADJUST_CFA, NULL_RTX))
+ {
+- rtx src = plus_constant (Pmode, stack_pointer_rtx,
+- callee_offset);
++ rtx src = plus_constant (Pmode, stack_pointer_rtx, chain_offset);
+ add_reg_note (insn, REG_CFA_ADJUST_CFA,
+ gen_rtx_SET (hard_frame_pointer_rtx, src));
+ }
+@@ -9795,7 +9843,7 @@ aarch64_expand_prologue (void)
+ emit_insn (gen_stack_tie (stack_pointer_rtx, hard_frame_pointer_rtx));
+ }
+
+- aarch64_save_callee_saves (saved_regs_offset, R0_REGNUM, R30_REGNUM,
++ aarch64_save_callee_saves (bytes_below_sp, R0_REGNUM, R30_REGNUM,
+ callee_adjust != 0 || emit_frame_chain,
+ emit_frame_chain);
+ if (maybe_ne (sve_callee_adjust, 0))
+@@ -9805,18 +9853,21 @@ aarch64_expand_prologue (void)
+ aarch64_allocate_and_probe_stack_space (tmp1_rtx, tmp0_rtx,
+ sve_callee_adjust,
+ !frame_pointer_needed, false);
+- saved_regs_offset += sve_callee_adjust;
++ bytes_below_sp -= sve_callee_adjust;
+ }
+- aarch64_save_callee_saves (saved_regs_offset, P0_REGNUM, P15_REGNUM,
++ aarch64_save_callee_saves (bytes_below_sp, P0_REGNUM, P15_REGNUM,
+ false, emit_frame_chain);
+- aarch64_save_callee_saves (saved_regs_offset, V0_REGNUM, V31_REGNUM,
++ aarch64_save_callee_saves (bytes_below_sp, V0_REGNUM, V31_REGNUM,
+ callee_adjust != 0 || emit_frame_chain,
+ emit_frame_chain);
+
+ /* We may need to probe the final adjustment if it is larger than the guard
+ that is assumed by the called. */
++ gcc_assert (known_eq (bytes_below_sp, final_adjust));
+ aarch64_allocate_and_probe_stack_space (tmp1_rtx, tmp0_rtx, final_adjust,
+ !frame_pointer_needed, true);
++ if (emit_frame_chain && maybe_ne (final_adjust, 0))
++ emit_insn (gen_stack_tie (stack_pointer_rtx, hard_frame_pointer_rtx));
+ }
+
+ /* Return TRUE if we can use a simple_return insn.
+@@ -9845,16 +9896,15 @@ aarch64_use_return_insn_p (void)
+ void
+ aarch64_expand_epilogue (bool for_sibcall)
+ {
+- poly_int64 initial_adjust = cfun->machine->frame.initial_adjust;
+- HOST_WIDE_INT callee_adjust = cfun->machine->frame.callee_adjust;
+- poly_int64 final_adjust = cfun->machine->frame.final_adjust;
+- poly_int64 callee_offset = cfun->machine->frame.callee_offset;
+- poly_int64 sve_callee_adjust = cfun->machine->frame.sve_callee_adjust;
+- poly_int64 below_hard_fp_saved_regs_size
+- = cfun->machine->frame.below_hard_fp_saved_regs_size;
+- unsigned reg1 = cfun->machine->frame.wb_pop_candidate1;
+- unsigned reg2 = cfun->machine->frame.wb_pop_candidate2;
+- unsigned int last_gpr = (cfun->machine->frame.is_scs_enabled
++ aarch64_frame &frame = cfun->machine->frame;
++ poly_int64 initial_adjust = frame.initial_adjust;
++ HOST_WIDE_INT callee_adjust = frame.callee_adjust;
++ poly_int64 final_adjust = frame.final_adjust;
++ poly_int64 sve_callee_adjust = frame.sve_callee_adjust;
++ poly_int64 bytes_below_hard_fp = frame.bytes_below_hard_fp;
++ unsigned reg1 = frame.wb_pop_candidate1;
++ unsigned reg2 = frame.wb_pop_candidate2;
++ unsigned int last_gpr = (frame.is_scs_enabled
+ ? R29_REGNUM : R30_REGNUM);
+ rtx cfi_ops = NULL;
+ rtx_insn *insn;
+@@ -9888,7 +9938,7 @@ aarch64_expand_epilogue (bool for_sibcall)
+ /* We need to add memory barrier to prevent read from deallocated stack. */
+ bool need_barrier_p
+ = maybe_ne (get_frame_size ()
+- + cfun->machine->frame.saved_varargs_size, 0);
++ + frame.saved_varargs_size, 0);
+
+ /* Emit a barrier to prevent loads from a deallocated stack. */
+ if (maybe_gt (final_adjust, crtl->outgoing_args_size)
+@@ -9909,7 +9959,7 @@ aarch64_expand_epilogue (bool for_sibcall)
+ is restored on the instruction doing the writeback. */
+ aarch64_add_offset (Pmode, stack_pointer_rtx,
+ hard_frame_pointer_rtx,
+- -callee_offset - below_hard_fp_saved_regs_size,
++ -bytes_below_hard_fp + final_adjust,
+ tmp1_rtx, tmp0_rtx, callee_adjust == 0);
+ else
+ /* The case where we need to re-use the register here is very rare, so
+@@ -9919,9 +9969,9 @@ aarch64_expand_epilogue (bool for_sibcall)
+
+ /* Restore the vector registers before the predicate registers,
+ so that we can use P4 as a temporary for big-endian SVE frames. */
+- aarch64_restore_callee_saves (callee_offset, V0_REGNUM, V31_REGNUM,
++ aarch64_restore_callee_saves (final_adjust, V0_REGNUM, V31_REGNUM,
+ callee_adjust != 0, &cfi_ops);
+- aarch64_restore_callee_saves (callee_offset, P0_REGNUM, P15_REGNUM,
++ aarch64_restore_callee_saves (final_adjust, P0_REGNUM, P15_REGNUM,
+ false, &cfi_ops);
+ if (maybe_ne (sve_callee_adjust, 0))
+ aarch64_add_sp (NULL_RTX, NULL_RTX, sve_callee_adjust, true);
+@@ -9929,7 +9979,7 @@ aarch64_expand_epilogue (bool for_sibcall)
+ /* When shadow call stack is enabled, the scs_pop in the epilogue will
+ restore x30, we don't need to restore x30 again in the traditional
+ way. */
+- aarch64_restore_callee_saves (callee_offset - sve_callee_adjust,
++ aarch64_restore_callee_saves (final_adjust + sve_callee_adjust,
+ R0_REGNUM, last_gpr,
+ callee_adjust != 0, &cfi_ops);
+
+@@ -9969,7 +10019,7 @@ aarch64_expand_epilogue (bool for_sibcall)
+ }
+
+ /* Pop return address from shadow call stack. */
+- if (cfun->machine->frame.is_scs_enabled)
++ if (frame.is_scs_enabled)
+ {
+ machine_mode mode = aarch64_reg_save_mode (R30_REGNUM);
+ rtx reg = gen_rtx_REG (mode, R30_REGNUM);
+@@ -12564,24 +12614,24 @@ aarch64_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
+ poly_int64
+ aarch64_initial_elimination_offset (unsigned from, unsigned to)
+ {
++ aarch64_frame &frame = cfun->machine->frame;
++
+ if (to == HARD_FRAME_POINTER_REGNUM)
+ {
+ if (from == ARG_POINTER_REGNUM)
+- return cfun->machine->frame.hard_fp_offset;
++ return frame.bytes_above_hard_fp;
+
+ if (from == FRAME_POINTER_REGNUM)
+- return cfun->machine->frame.hard_fp_offset
+- - cfun->machine->frame.locals_offset;
++ return frame.bytes_above_hard_fp - frame.bytes_above_locals;
+ }
+
+ if (to == STACK_POINTER_REGNUM)
+ {
+ if (from == FRAME_POINTER_REGNUM)
+- return cfun->machine->frame.frame_size
+- - cfun->machine->frame.locals_offset;
++ return frame.frame_size - frame.bytes_above_locals;
+ }
+
+- return cfun->machine->frame.frame_size;
++ return frame.frame_size;
+ }
+
+
+@@ -24686,10 +24736,11 @@ aarch64_copy_one_block_and_progress_pointers (rtx *src, rtx *dst,
+ *dst = aarch64_progress_pointer (*dst);
+ }
+
+-/* Expand a cpymem using the MOPS extension. OPERANDS are taken
+- from the cpymem pattern. Return true iff we succeeded. */
+-static bool
+-aarch64_expand_cpymem_mops (rtx *operands)
++/* Expand a cpymem/movmem using the MOPS extension. OPERANDS are taken
++ from the cpymem/movmem pattern. IS_MEMMOVE is true if this is a memmove
++ rather than memcpy. Return true iff we succeeded. */
++bool
++aarch64_expand_cpymem_mops (rtx *operands, bool is_memmove = false)
+ {
+ if (!TARGET_MOPS)
+ return false;
+@@ -24701,8 +24752,10 @@ aarch64_expand_cpymem_mops (rtx *operands)
+ rtx dst_mem = replace_equiv_address (operands[0], dst_addr);
+ rtx src_mem = replace_equiv_address (operands[1], src_addr);
+ rtx sz_reg = copy_to_mode_reg (DImode, operands[2]);
+- emit_insn (gen_aarch64_cpymemdi (dst_mem, src_mem, sz_reg));
+-
++ if (is_memmove)
++ emit_insn (gen_aarch64_movmemdi (dst_mem, src_mem, sz_reg));
++ else
++ emit_insn (gen_aarch64_cpymemdi (dst_mem, src_mem, sz_reg));
+ return true;
+ }
+
+@@ -25981,11 +26034,9 @@ aarch64_operands_ok_for_ldpstp (rtx *operands, bool load,
+ gcc_assert (known_eq (GET_MODE_SIZE (GET_MODE (mem_1)),
+ GET_MODE_SIZE (GET_MODE (mem_2))));
+
+- /* One of the memory accesses must be a mempair operand.
+- If it is not the first one, they need to be swapped by the
+- peephole. */
+- if (!aarch64_mem_pair_operand (mem_1, GET_MODE (mem_1))
+- && !aarch64_mem_pair_operand (mem_2, GET_MODE (mem_2)))
++ /* The lower memory access must be a mem-pair operand. */
++ rtx lower_mem = reversed ? mem_2 : mem_1;
++ if (!aarch64_mem_pair_operand (lower_mem, GET_MODE (lower_mem)))
+ return false;
+
+ if (REG_P (reg_1) && FP_REGNUM_P (REGNO (reg_1)))
+--- a/src/gcc/config/aarch64/aarch64.h
++++ b/src/gcc/config/aarch64/aarch64.h
+@@ -860,6 +860,9 @@ extern enum aarch64_processor aarch64_tune;
+ #ifdef HAVE_POLY_INT_H
+ struct GTY (()) aarch64_frame
+ {
++ /* The offset from the bottom of the static frame (the bottom of the
++ outgoing arguments) of each register save slot, or -2 if no save is
++ needed. */
+ poly_int64 reg_offset[LAST_SAVED_REGNUM + 1];
+
+ /* The number of extra stack bytes taken up by register varargs.
+@@ -868,25 +871,28 @@ struct GTY (()) aarch64_frame
+ STACK_BOUNDARY. */
+ HOST_WIDE_INT saved_varargs_size;
+
+- /* The size of the callee-save registers with a slot in REG_OFFSET. */
+- poly_int64 saved_regs_size;
++ /* The number of bytes between the bottom of the static frame (the bottom
++ of the outgoing arguments) and the bottom of the register save area.
++ This value is always a multiple of STACK_BOUNDARY. */
++ poly_int64 bytes_below_saved_regs;
+
+- /* The size of the callee-save registers with a slot in REG_OFFSET that
+- are saved below the hard frame pointer. */
+- poly_int64 below_hard_fp_saved_regs_size;
++ /* The number of bytes between the bottom of the static frame (the bottom
++ of the outgoing arguments) and the hard frame pointer. This value is
++ always a multiple of STACK_BOUNDARY. */
++ poly_int64 bytes_below_hard_fp;
+
+- /* Offset from the base of the frame (incomming SP) to the
+- top of the locals area. This value is always a multiple of
++ /* The number of bytes between the top of the locals area and the top
++ of the frame (the incomming SP). This value is always a multiple of
+ STACK_BOUNDARY. */
+- poly_int64 locals_offset;
++ poly_int64 bytes_above_locals;
+
+- /* Offset from the base of the frame (incomming SP) to the
+- hard_frame_pointer. This value is always a multiple of
++ /* The number of bytes between the hard_frame_pointer and the top of
++ the frame (the incomming SP). This value is always a multiple of
+ STACK_BOUNDARY. */
+- poly_int64 hard_fp_offset;
++ poly_int64 bytes_above_hard_fp;
+
+- /* The size of the frame. This value is the offset from base of the
+- frame (incomming SP) to the stack_pointer. This value is always
++ /* The size of the frame, i.e. the number of bytes between the bottom
++ of the outgoing arguments and the incoming SP. This value is always
+ a multiple of STACK_BOUNDARY. */
+ poly_int64 frame_size;
+
+@@ -897,10 +903,6 @@ struct GTY (()) aarch64_frame
+ It is zero when no push is used. */
+ HOST_WIDE_INT callee_adjust;
+
+- /* The offset from SP to the callee-save registers after initial_adjust.
+- It may be non-zero if no push is used (ie. callee_adjust == 0). */
+- poly_int64 callee_offset;
+-
+ /* The size of the stack adjustment before saving or after restoring
+ SVE registers. */
+ poly_int64 sve_callee_adjust;
+@@ -948,6 +950,14 @@ struct GTY (()) aarch64_frame
+ This is the register they should use. */
+ unsigned spare_pred_reg;
+
++ /* An SVE register that is saved below the hard frame pointer and that acts
++ as a probe for later allocations, or INVALID_REGNUM if none. */
++ unsigned sve_save_and_probe;
++
++ /* A register that is saved at the hard frame pointer and that acts
++ as a probe for later allocations, or INVALID_REGNUM if none. */
++ unsigned hard_fp_save_and_probe;
++
+ bool laid_out;
+
+ /* True if shadow call stack should be enabled for the current function. */
+--- a/src/gcc/config/aarch64/aarch64.md
++++ b/src/gcc/config/aarch64/aarch64.md
+@@ -1159,9 +1159,27 @@ (define_expand "untyped_call"
+ {
+ int i;
+
++ /* Generate a PARALLEL that contains all of the register results.
++ The offsets are somewhat arbitrary, since we don't know the
++ actual return type. The main thing we need to avoid is having
++ overlapping byte ranges, since those might give the impression
++ that two registers are known to have data in common. */
++ rtvec rets = rtvec_alloc (XVECLEN (operands[2], 0));
++ poly_int64 offset = 0;
++ for (i = 0; i < XVECLEN (operands[2], 0); i++)
++ {
++ rtx reg = SET_SRC (XVECEXP (operands[2], 0, i));
++ gcc_assert (REG_P (reg));
++ rtx offset_rtx = gen_int_mode (offset, Pmode);
++ rtx piece = gen_rtx_EXPR_LIST (VOIDmode, reg, offset_rtx);
++ RTVEC_ELT (rets, i) = piece;
++ offset += GET_MODE_SIZE (GET_MODE (reg));
++ }
++ rtx ret = gen_rtx_PARALLEL (VOIDmode, rets);
++
+ /* Untyped calls always use the default ABI. It's only possible to use
+ ABI variants if we know the type of the target function. */
+- emit_call_insn (gen_call (operands[0], const0_rtx, const0_rtx));
++ emit_call_insn (gen_call_value (ret, operands[0], const0_rtx, const0_rtx));
+
+ for (i = 0; i < XVECLEN (operands[2], 0); i++)
+ {
+@@ -1627,7 +1645,22 @@ (define_expand "cpymemdi"
+ }
+ )
+
+-(define_insn "aarch64_movmemdi"
++(define_expand "aarch64_movmemdi"
++ [(parallel
++ [(set (match_operand 2) (const_int 0))
++ (clobber (match_dup 3))
++ (clobber (match_dup 4))
++ (clobber (reg:CC CC_REGNUM))
++ (set (match_operand 0)
++ (unspec:BLK [(match_operand 1) (match_dup 2)] UNSPEC_MOVMEM))])]
++ "TARGET_MOPS"
++ {
++ operands[3] = XEXP (operands[0], 0);
++ operands[4] = XEXP (operands[1], 0);
++ }
++)
++
++(define_insn "*aarch64_movmemdi"
+ [(parallel [
+ (set (match_operand:DI 2 "register_operand" "+&r") (const_int 0))
+ (clobber (match_operand:DI 0 "register_operand" "+&r"))
+@@ -1660,17 +1693,9 @@ (define_expand "movmemdi"
+ && INTVAL (sz_reg) < aarch64_mops_memmove_size_threshold)
+ FAIL;
+
+- rtx addr_dst = XEXP (operands[0], 0);
+- rtx addr_src = XEXP (operands[1], 0);
+-
+- if (!REG_P (sz_reg))
+- sz_reg = force_reg (DImode, sz_reg);
+- if (!REG_P (addr_dst))
+- addr_dst = force_reg (DImode, addr_dst);
+- if (!REG_P (addr_src))
+- addr_src = force_reg (DImode, addr_src);
+- emit_insn (gen_aarch64_movmemdi (addr_dst, addr_src, sz_reg));
+- DONE;
++ if (aarch64_expand_cpymem_mops (operands, true))
++ DONE;
++ FAIL;
+ }
+ )
+
+@@ -7668,9 +7693,9 @@ (define_insn "stg"
+ ;; Load/Store 64-bit (LS64) instructions.
+ (define_insn "ld64b"
+ [(set (match_operand:V8DI 0 "register_operand" "=r")
+- (unspec_volatile:V8DI
+- [(mem:V8DI (match_operand:DI 1 "register_operand" "r"))]
+- UNSPEC_LD64B)
++ (unspec_volatile:V8DI
++ [(mem:V8DI (match_operand:DI 1 "register_operand" "r"))]
++ UNSPEC_LD64B)
+ )]
+ "TARGET_LS64"
+ "ld64b\\t%0, [%1]"
+@@ -7678,9 +7703,9 @@ (define_insn "ld64b"
+ )
+
+ (define_insn "st64b"
+- [(set (mem:V8DI (match_operand:DI 0 "register_operand" "=r"))
+- (unspec_volatile:V8DI [(match_operand:V8DI 1 "register_operand" "r")]
+- UNSPEC_ST64B)
++ [(set (mem:V8DI (match_operand:DI 0 "register_operand" "r"))
++ (unspec_volatile:V8DI [(match_operand:V8DI 1 "register_operand" "r")]
++ UNSPEC_ST64B)
+ )]
+ "TARGET_LS64"
+ "st64b\\t%1, [%0]"
+@@ -7689,10 +7714,10 @@ (define_insn "st64b"
+
+ (define_insn "st64bv"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+- (unspec_volatile:DI [(const_int 0)] UNSPEC_ST64BV_RET))
++ (unspec_volatile:DI [(const_int 0)] UNSPEC_ST64BV_RET))
+ (set (mem:V8DI (match_operand:DI 1 "register_operand" "r"))
+- (unspec_volatile:V8DI [(match_operand:V8DI 2 "register_operand" "r")]
+- UNSPEC_ST64BV)
++ (unspec_volatile:V8DI [(match_operand:V8DI 2 "register_operand" "r")]
++ UNSPEC_ST64BV)
+ )]
+ "TARGET_LS64"
+ "st64bv\\t%0, %2, [%1]"
+@@ -7701,10 +7726,10 @@ (define_insn "st64bv"
+
+ (define_insn "st64bv0"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+- (unspec_volatile:DI [(const_int 0)] UNSPEC_ST64BV0_RET))
++ (unspec_volatile:DI [(const_int 0)] UNSPEC_ST64BV0_RET))
+ (set (mem:V8DI (match_operand:DI 1 "register_operand" "r"))
+- (unspec_volatile:V8DI [(match_operand:V8DI 2 "register_operand" "r")]
+- UNSPEC_ST64BV0)
++ (unspec_volatile:V8DI [(match_operand:V8DI 2 "register_operand" "r")]
++ UNSPEC_ST64BV0)
+ )]
+ "TARGET_LS64"
+ "st64bv0\\t%0, %2, [%1]"
+--- a/src/gcc/config/aarch64/arm_acle.h
++++ b/src/gcc/config/aarch64/arm_acle.h
+@@ -270,40 +270,7 @@ __ttest (void)
+ #endif
+
+ #ifdef __ARM_FEATURE_LS64
+-#pragma GCC push_options
+-#pragma GCC target ("+nothing+ls64")
+-
+ typedef __arm_data512_t data512_t;
+-
+-__extension__ extern __inline data512_t
+-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+-__arm_ld64b (const void *__addr)
+-{
+- return __builtin_aarch64_ld64b (__addr);
+-}
+-
+-__extension__ extern __inline void
+-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+-__arm_st64b (void *__addr, data512_t __value)
+-{
+- __builtin_aarch64_st64b (__addr, __value);
+-}
+-
+-__extension__ extern __inline uint64_t
+-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+-__arm_st64bv (void *__addr, data512_t __value)
+-{
+- return __builtin_aarch64_st64bv (__addr, __value);
+-}
+-
+-__extension__ extern __inline uint64_t
+-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+-__arm_st64bv0 (void *__addr, data512_t __value)
+-{
+- return __builtin_aarch64_st64bv0 (__addr, __value);
+-}
+-
+-#pragma GCC pop_options
+ #endif
+
+ #pragma GCC push_options
+--- a/src/gcc/config/alpha/alpha.cc
++++ b/src/gcc/config/alpha/alpha.cc
+@@ -2070,6 +2070,8 @@ static rtx
+ alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1)
+ {
+ HOST_WIDE_INT d1, d2, d3, d4;
++ machine_mode mode = GET_MODE (target);
++ rtx orig_target = target;
+
+ /* Decompose the entire word */
+
+@@ -2082,6 +2084,9 @@ alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1)
+ d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
+ gcc_assert (c1 == d4);
+
++ if (mode != DImode)
++ target = gen_lowpart (DImode, target);
++
+ /* Construct the high word */
+ if (d4)
+ {
+@@ -2101,7 +2106,7 @@ alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1)
+ if (d1)
+ emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
+
+- return target;
++ return orig_target;
+ }
+
+ /* Given an integral CONST_INT or CONST_VECTOR, return the low 64 bits. */
+--- a/src/gcc/config/arm/arm-builtins.cc
++++ b/src/gcc/config/arm/arm-builtins.cc
+@@ -97,7 +97,7 @@ arm_binop_imm_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ /* T (T, unsigned immediate). */
+ static enum arm_type_qualifiers
+ arm_sat_binop_imm_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+- = { qualifier_unsigned, qualifier_none, qualifier_unsigned_immediate };
++ = { qualifier_none, qualifier_none, qualifier_unsigned_immediate };
+ #define SAT_BINOP_UNSIGNED_IMM_QUALIFIERS \
+ (arm_sat_binop_imm_qualifiers)
+
+--- a/src/gcc/config/arm/arm.md
++++ b/src/gcc/config/arm/arm.md
+@@ -7466,7 +7466,7 @@ (define_expand "movdf"
+ && !arm_const_double_rtx (operands[1])
+ && !(TARGET_VFP_DOUBLE && vfp3_const_double_rtx (operands[1])))
+ {
+- rtx clobreg = gen_reg_rtx (DFmode);
++ rtx clobreg = gen_reg_rtx (DImode);
+ emit_insn (gen_no_literal_pool_df_immediate (operands[0], operands[1],
+ clobreg));
+ DONE;
+--- a/src/gcc/config/arm/arm_mve.h
++++ b/src/gcc/config/arm/arm_mve.h
+@@ -9675,42 +9675,42 @@ __arm_vabdq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pr
+
+ __extension__ extern __inline int8x16_t
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+-__arm_vaddq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int __b, mve_pred16_t __p)
++__arm_vaddq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
+ {
+ return __builtin_mve_vaddq_m_n_sv16qi (__inactive, __a, __b, __p);
+ }
+
+ __extension__ extern __inline int32x4_t
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+-__arm_vaddq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int __b, mve_pred16_t __p)
++__arm_vaddq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
+ {
+ return __builtin_mve_vaddq_m_n_sv4si (__inactive, __a, __b, __p);
+ }
+
+ __extension__ extern __inline int16x8_t
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+-__arm_vaddq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int __b, mve_pred16_t __p)
++__arm_vaddq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
+ {
+ return __builtin_mve_vaddq_m_n_sv8hi (__inactive, __a, __b, __p);
+ }
+
+ __extension__ extern __inline uint8x16_t
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+-__arm_vaddq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, int __b, mve_pred16_t __p)
++__arm_vaddq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+ {
+ return __builtin_mve_vaddq_m_n_uv16qi (__inactive, __a, __b, __p);
+ }
+
+ __extension__ extern __inline uint32x4_t
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+-__arm_vaddq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, int __b, mve_pred16_t __p)
++__arm_vaddq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+ {
+ return __builtin_mve_vaddq_m_n_uv4si (__inactive, __a, __b, __p);
+ }
+
+ __extension__ extern __inline uint16x8_t
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+-__arm_vaddq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, int __b, mve_pred16_t __p)
++__arm_vaddq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+ {
+ return __builtin_mve_vaddq_m_n_uv8hi (__inactive, __a, __b, __p);
+ }
+@@ -16055,7 +16055,7 @@ __extension__ extern __inline int32x4_t
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+ __arm_vadcq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry)
+ {
+- __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
++ __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | ((*__carry & 0x1u) << 29));
+ int32x4_t __res = __builtin_mve_vadcq_sv4si (__a, __b);
+ *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
+ return __res;
+@@ -16065,7 +16065,7 @@ __extension__ extern __inline uint32x4_t
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+ __arm_vadcq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry)
+ {
+- __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
++ __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | ((*__carry & 0x1u) << 29));
+ uint32x4_t __res = __builtin_mve_vadcq_uv4si (__a, __b);
+ *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
+ return __res;
+@@ -16075,7 +16075,7 @@ __extension__ extern __inline int32x4_t
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+ __arm_vadcq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry, mve_pred16_t __p)
+ {
+- __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
++ __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | ((*__carry & 0x1u) << 29));
+ int32x4_t __res = __builtin_mve_vadcq_m_sv4si (__inactive, __a, __b, __p);
+ *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
+ return __res;
+@@ -16085,7 +16085,7 @@ __extension__ extern __inline uint32x4_t
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+ __arm_vadcq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry, mve_pred16_t __p)
+ {
+- __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
++ __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | ((*__carry & 0x1u) << 29));
+ uint32x4_t __res = __builtin_mve_vadcq_m_uv4si (__inactive, __a, __b, __p);
+ *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
+ return __res;
+@@ -16131,7 +16131,7 @@ __extension__ extern __inline int32x4_t
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+ __arm_vsbcq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry)
+ {
+- __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
++ __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | ((*__carry & 0x1u) << 29));
+ int32x4_t __res = __builtin_mve_vsbcq_sv4si (__a, __b);
+ *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
+ return __res;
+@@ -16141,7 +16141,7 @@ __extension__ extern __inline uint32x4_t
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+ __arm_vsbcq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry)
+ {
+- __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
++ __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | ((*__carry & 0x1u) << 29));
+ uint32x4_t __res = __builtin_mve_vsbcq_uv4si (__a, __b);
+ *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
+ return __res;
+@@ -16151,7 +16151,7 @@ __extension__ extern __inline int32x4_t
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+ __arm_vsbcq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry, mve_pred16_t __p)
+ {
+- __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
++ __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | ((*__carry & 0x1u) << 29));
+ int32x4_t __res = __builtin_mve_vsbcq_m_sv4si (__inactive, __a, __b, __p);
+ *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
+ return __res;
+@@ -16161,7 +16161,7 @@ __extension__ extern __inline uint32x4_t
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+ __arm_vsbcq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry, mve_pred16_t __p)
+ {
+- __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
++ __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | ((*__carry & 0x1u) << 29));
+ uint32x4_t __res = __builtin_mve_vsbcq_m_uv4si (__inactive, __a, __b, __p);
+ *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
+ return __res;
+@@ -16171,14 +16171,14 @@ __extension__ extern __inline void
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+ __arm_vst1q_p_u8 (uint8_t * __addr, uint8x16_t __value, mve_pred16_t __p)
+ {
+- return vstrbq_p_u8 (__addr, __value, __p);
++ return __arm_vstrbq_p_u8 (__addr, __value, __p);
+ }
+
+ __extension__ extern __inline void
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+ __arm_vst1q_p_s8 (int8_t * __addr, int8x16_t __value, mve_pred16_t __p)
+ {
+- return vstrbq_p_s8 (__addr, __value, __p);
++ return __arm_vstrbq_p_s8 (__addr, __value, __p);
+ }
+
+ __extension__ extern __inline void
+@@ -16203,14 +16203,14 @@ __extension__ extern __inline uint8x16_t
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+ __arm_vld1q_z_u8 (uint8_t const *__base, mve_pred16_t __p)
+ {
+- return vldrbq_z_u8 ( __base, __p);
++ return __arm_vldrbq_z_u8 ( __base, __p);
+ }
+
+ __extension__ extern __inline int8x16_t
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+ __arm_vld1q_z_s8 (int8_t const *__base, mve_pred16_t __p)
+ {
+- return vldrbq_z_s8 ( __base, __p);
++ return __arm_vldrbq_z_s8 ( __base, __p);
+ }
+
+ __extension__ extern __inline int8x16x2_t
+@@ -16253,14 +16253,14 @@ __extension__ extern __inline void
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+ __arm_vst1q_p_u16 (uint16_t * __addr, uint16x8_t __value, mve_pred16_t __p)
+ {
+- return vstrhq_p_u16 (__addr, __value, __p);
++ return __arm_vstrhq_p_u16 (__addr, __value, __p);
+ }
+
+ __extension__ extern __inline void
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+ __arm_vst1q_p_s16 (int16_t * __addr, int16x8_t __value, mve_pred16_t __p)
+ {
+- return vstrhq_p_s16 (__addr, __value, __p);
++ return __arm_vstrhq_p_s16 (__addr, __value, __p);
+ }
+
+ __extension__ extern __inline void
+@@ -16285,14 +16285,14 @@ __extension__ extern __inline uint16x8_t
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+ __arm_vld1q_z_u16 (uint16_t const *__base, mve_pred16_t __p)
+ {
+- return vldrhq_z_u16 ( __base, __p);
++ return __arm_vldrhq_z_u16 ( __base, __p);
+ }
+
+ __extension__ extern __inline int16x8_t
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+ __arm_vld1q_z_s16 (int16_t const *__base, mve_pred16_t __p)
+ {
+- return vldrhq_z_s16 ( __base, __p);
++ return __arm_vldrhq_z_s16 ( __base, __p);
+ }
+
+ __extension__ extern __inline int16x8x2_t
+@@ -16335,14 +16335,14 @@ __extension__ extern __inline void
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+ __arm_vst1q_p_u32 (uint32_t * __addr, uint32x4_t __value, mve_pred16_t __p)
+ {
+- return vstrwq_p_u32 (__addr, __value, __p);
++ return __arm_vstrwq_p_u32 (__addr, __value, __p);
+ }
+
+ __extension__ extern __inline void
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+ __arm_vst1q_p_s32 (int32_t * __addr, int32x4_t __value, mve_pred16_t __p)
+ {
+- return vstrwq_p_s32 (__addr, __value, __p);
++ return __arm_vstrwq_p_s32 (__addr, __value, __p);
+ }
+
+ __extension__ extern __inline void
+@@ -16367,14 +16367,14 @@ __extension__ extern __inline uint32x4_t
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+ __arm_vld1q_z_u32 (uint32_t const *__base, mve_pred16_t __p)
+ {
+- return vldrwq_z_u32 ( __base, __p);
++ return __arm_vldrwq_z_u32 ( __base, __p);
+ }
+
+ __extension__ extern __inline int32x4_t
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+ __arm_vld1q_z_s32 (int32_t const *__base, mve_pred16_t __p)
+ {
+- return vldrwq_z_s32 ( __base, __p);
++ return __arm_vldrwq_z_s32 ( __base, __p);
+ }
+
+ __extension__ extern __inline int32x4x2_t
+@@ -19837,7 +19837,7 @@ __extension__ extern __inline float16x8_t
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+ __arm_vld1q_z_f16 (float16_t const *__base, mve_pred16_t __p)
+ {
+- return vldrhq_z_f16 (__base, __p);
++ return __arm_vldrhq_z_f16 (__base, __p);
+ }
+
+ __extension__ extern __inline void
+@@ -19853,7 +19853,7 @@ __extension__ extern __inline void
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+ __arm_vst1q_p_f16 (float16_t * __addr, float16x8_t __value, mve_pred16_t __p)
+ {
+- return vstrhq_p_f16 (__addr, __value, __p);
++ return __arm_vstrhq_p_f16 (__addr, __value, __p);
+ }
+
+ __extension__ extern __inline float32x4x4_t
+@@ -19878,7 +19878,7 @@ __extension__ extern __inline float32x4_t
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+ __arm_vld1q_z_f32 (float32_t const *__base, mve_pred16_t __p)
+ {
+- return vldrwq_z_f32 (__base, __p);
++ return __arm_vldrwq_z_f32 (__base, __p);
+ }
+
+ __extension__ extern __inline void
+@@ -19894,7 +19894,7 @@ __extension__ extern __inline void
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+ __arm_vst1q_p_f32 (float32_t * __addr, float32x4_t __value, mve_pred16_t __p)
+ {
+- return vstrwq_p_f32 (__addr, __value, __p);
++ return __arm_vstrwq_p_f32 (__addr, __value, __p);
+ }
+
+ __extension__ extern __inline float16x8_t
+@@ -26417,42 +26417,42 @@ __arm_vabdq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16
+
+ __extension__ extern __inline int8x16_t
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+-__arm_vaddq_m (int8x16_t __inactive, int8x16_t __a, int __b, mve_pred16_t __p)
++__arm_vaddq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
+ {
+ return __arm_vaddq_m_n_s8 (__inactive, __a, __b, __p);
+ }
+
+ __extension__ extern __inline int32x4_t
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+-__arm_vaddq_m (int32x4_t __inactive, int32x4_t __a, int __b, mve_pred16_t __p)
++__arm_vaddq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
+ {
+ return __arm_vaddq_m_n_s32 (__inactive, __a, __b, __p);
+ }
+
+ __extension__ extern __inline int16x8_t
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+-__arm_vaddq_m (int16x8_t __inactive, int16x8_t __a, int __b, mve_pred16_t __p)
++__arm_vaddq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
+ {
+ return __arm_vaddq_m_n_s16 (__inactive, __a, __b, __p);
+ }
+
+ __extension__ extern __inline uint8x16_t
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+-__arm_vaddq_m (uint8x16_t __inactive, uint8x16_t __a, int __b, mve_pred16_t __p)
++__arm_vaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+ {
+ return __arm_vaddq_m_n_u8 (__inactive, __a, __b, __p);
+ }
+
+ __extension__ extern __inline uint32x4_t
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+-__arm_vaddq_m (uint32x4_t __inactive, uint32x4_t __a, int __b, mve_pred16_t __p)
++__arm_vaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+ {
+ return __arm_vaddq_m_n_u32 (__inactive, __a, __b, __p);
+ }
+
+ __extension__ extern __inline uint16x8_t
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+-__arm_vaddq_m (uint16x8_t __inactive, uint16x8_t __a, int __b, mve_pred16_t __p)
++__arm_vaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+ {
+ return __arm_vaddq_m_n_u16 (__inactive, __a, __b, __p);
+ }
+@@ -35582,13 +35582,29 @@ enum {
+ short: __ARM_mve_type_int_n, \
+ int: __ARM_mve_type_int_n, \
+ long: __ARM_mve_type_int_n, \
+- double: __ARM_mve_type_fp_n, \
+ long long: __ARM_mve_type_int_n, \
++ _Float16: __ARM_mve_type_fp_n, \
++ __fp16: __ARM_mve_type_fp_n, \
++ float: __ARM_mve_type_fp_n, \
++ double: __ARM_mve_type_fp_n, \
+ unsigned char: __ARM_mve_type_int_n, \
+ unsigned short: __ARM_mve_type_int_n, \
+ unsigned int: __ARM_mve_type_int_n, \
+ unsigned long: __ARM_mve_type_int_n, \
+ unsigned long long: __ARM_mve_type_int_n, \
++ signed char*: __ARM_mve_type_int8_t_ptr, \
++ short*: __ARM_mve_type_int16_t_ptr, \
++ int*: __ARM_mve_type_int32_t_ptr, \
++ long*: __ARM_mve_type_int32_t_ptr, \
++ long long*: __ARM_mve_type_int64_t_ptr, \
++ _Float16*: __ARM_mve_type_float16_t_ptr, \
++ __fp16*: __ARM_mve_type_float16_t_ptr, \
++ float*: __ARM_mve_type_float32_t_ptr, \
++ unsigned char*: __ARM_mve_type_uint8_t_ptr, \
++ unsigned short*: __ARM_mve_type_uint16_t_ptr, \
++ unsigned int*: __ARM_mve_type_uint32_t_ptr, \
++ unsigned long*: __ARM_mve_type_uint32_t_ptr, \
++ unsigned long long*: __ARM_mve_type_uint64_t_ptr, \
+ default: __ARM_mve_unsupported_type))
+ #else
+ #define __ARM_mve_typeid(x) _Generic(x, \
+@@ -35647,30 +35663,67 @@ enum {
+ unsigned int: __ARM_mve_type_int_n, \
+ unsigned long: __ARM_mve_type_int_n, \
+ unsigned long long: __ARM_mve_type_int_n, \
++ signed char*: __ARM_mve_type_int8_t_ptr, \
++ short*: __ARM_mve_type_int16_t_ptr, \
++ int*: __ARM_mve_type_int32_t_ptr, \
++ long*: __ARM_mve_type_int32_t_ptr, \
++ long long*: __ARM_mve_type_int64_t_ptr, \
++ unsigned char*: __ARM_mve_type_uint8_t_ptr, \
++ unsigned short*: __ARM_mve_type_uint16_t_ptr, \
++ unsigned int*: __ARM_mve_type_uint32_t_ptr, \
++ unsigned long*: __ARM_mve_type_uint32_t_ptr, \
++ unsigned long long*: __ARM_mve_type_uint64_t_ptr, \
+ default: __ARM_mve_unsupported_type))
+ #endif /* MVE Floating point. */
+
+ extern void *__ARM_undef;
+ #define __ARM_mve_coerce(param, type) \
+ _Generic(param, type: param, default: *(type *)__ARM_undef)
+-#define __ARM_mve_coerce1(param, type) \
+- _Generic(param, type: param, const type: param, default: *(type *)__ARM_undef)
+-#define __ARM_mve_coerce2(param, type) \
+- _Generic(param, type: param, float16_t: param, float32_t: param, default: *(type *)__ARM_undef)
++#define __ARM_mve_coerce_i_scalar(param, type) \
++ _Generic(param, type: param, const type: param, default: _Generic (param, int8_t: param, int16_t: param, int32_t: param, int64_t: param, uint8_t: param, uint16_t: param, uint32_t: param, uint64_t: param, default: *(type *)__ARM_undef))
++
++#define __ARM_mve_coerce_s8_ptr(param, type) \
++ _Generic(param, type: param, const type: param, default: _Generic (param, signed char*: param, default: *(type *)__ARM_undef))
++#define __ARM_mve_coerce_u8_ptr(param, type) \
++ _Generic(param, type: param, const type: param, default: _Generic (param, unsigned char*: param, default: *(type *)__ARM_undef))
++
++#define __ARM_mve_coerce_s16_ptr(param, type) \
++ _Generic(param, type: param, const type: param, default: _Generic (param, short*: param, default: *(type *)__ARM_undef))
++#define __ARM_mve_coerce_u16_ptr(param, type) \
++ _Generic(param, type: param, const type: param, default: _Generic (param, unsigned short*: param, default: *(type *)__ARM_undef))
++
++#define __ARM_mve_coerce_s32_ptr(param, type) \
++ _Generic(param, type: param, const type: param, default: _Generic (param, int*: param, long*: param, default: *(type *)__ARM_undef))
++#define __ARM_mve_coerce_u32_ptr(param, type) \
++ _Generic(param, type: param, const type: param, default: _Generic (param, unsigned int*: param, unsigned long*: param, default: *(type *)__ARM_undef))
++
++#define __ARM_mve_coerce_s64_ptr(param, type) \
++ _Generic(param, type: param, const type: param, default: _Generic (param, long long*: param, default: *(type *)__ARM_undef))
++#define __ARM_mve_coerce_u64_ptr(param, type) \
++ _Generic(param, type: param, const type: param, default: _Generic (param, unsigned long long*: param, default: *(type *)__ARM_undef))
++
++#if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */
++#define __ARM_mve_coerce_f_scalar(param, type) \
++ _Generic(param, type: param, const type: param, __fp16: param, default: _Generic (param, _Float16: param, float16_t: param, float32_t: param, default: *(type *)__ARM_undef))
++#define __ARM_mve_coerce_f16_ptr(param, type) \
++ _Generic(param, type: param, const type: param, default: _Generic (param, __fp16*: param, _Float16*: param, default: *(type *)__ARM_undef))
++#define __ARM_mve_coerce_f32_ptr(param, type) \
++ _Generic(param, type: param, const type: param, default: _Generic (param, float*: param, default: *(type *)__ARM_undef))
++#endif
+
+ #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */
+
+ #define __arm_vst4q(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x4_t]: __arm_vst4q_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x4_t)), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x4_t]: __arm_vst4q_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x4_t)), \
+- int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x4_t]: __arm_vst4q_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x4_t)), \
+- int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x4_t]: __arm_vst4q_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x4_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x4_t]: __arm_vst4q_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x4_t)), \
+- int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x4_t]: __arm_vst4q_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x4_t)), \
+- int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8x4_t]: __arm_vst4q_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, float16x8x4_t)), \
+- int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4x4_t]: __arm_vst4q_f32 (__ARM_mve_coerce(__p0, float32_t *), __ARM_mve_coerce(__p1, float32x4x4_t)));})
++ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x4_t]: __arm_vst4q_s8 (__ARM_mve_coerce_s8_ptr(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x4_t)), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x4_t]: __arm_vst4q_s16 (__ARM_mve_coerce_s16_ptr(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x4_t)), \
++ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x4_t]: __arm_vst4q_s32 (__ARM_mve_coerce_s32_ptr(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x4_t)), \
++ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x4_t]: __arm_vst4q_u8 (__ARM_mve_coerce_u8_ptr(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x4_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x4_t]: __arm_vst4q_u16 (__ARM_mve_coerce_u16_ptr(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x4_t)), \
++ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x4_t]: __arm_vst4q_u32 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x4_t)), \
++ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8x4_t]: __arm_vst4q_f16 (__ARM_mve_coerce_f16_ptr(__p0, float16_t *), __ARM_mve_coerce(__p1, float16x8x4_t)), \
++ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4x4_t]: __arm_vst4q_f32 (__ARM_mve_coerce_f32_ptr(__p0, float32_t *), __ARM_mve_coerce(__p1, float32x4x4_t)));})
+
+ #define __arm_vrndxq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+@@ -35847,6 +35900,10 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vorrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vorrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vorrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vorrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vorrq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vorrq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+@@ -35871,16 +35928,16 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
+- int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vaddq_f16 (__ARM_mve_coerce(p0, float16x8_t), __ARM_mve_coerce(p1, float16x8_t)), \
+- int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vaddq_f32 (__ARM_mve_coerce(p0, float32x4_t), __ARM_mve_coerce(p1, float32x4_t)), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int)), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int)), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int)), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int)), \
+- int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vaddq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double)), \
+- int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vaddq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double)));})
++ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vaddq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
++ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vaddq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vaddq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce_f_scalar(__p1, double)), \
++ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vaddq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce_f_scalar(__p1, double)));})
+
+ #define __arm_vandq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -35897,10 +35954,10 @@ extern void *__ARM_undef;
+ #define __arm_vbicq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vbicq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce1 (__p1, int)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vbicq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce1 (__p1, int)), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vbicq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce1 (__p1, int)), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vbicq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce1 (__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vbicq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar (__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vbicq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar (__p1, int)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vbicq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar (__p1, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vbicq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar (__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+@@ -35925,14 +35982,14 @@ extern void *__ARM_undef;
+ #define __arm_vmulq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
+- int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vmulq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double)), \
+- int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vmulq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double)), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vmulq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce_f_scalar(__p1, double)), \
++ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vmulq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce_f_scalar(__p1, double)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+@@ -35957,14 +36014,14 @@ extern void *__ARM_undef;
+ #define __arm_vcmpeqq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
+- int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpeqq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double)), \
+- int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpeqq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double)), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpeqq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce_f_scalar(__p1, double)), \
++ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpeqq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce_f_scalar(__p1, double)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpeqq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpeqq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpeqq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+@@ -35995,16 +36052,16 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpeqq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpeqq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
+- int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpeqq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double), p2), \
+- int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpeqq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double), p2));})
++ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpeqq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce_f_scalar(__p1, double), p2), \
++ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpeqq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce_f_scalar(__p1, double), p2));})
+
+ #define __arm_vcmpgtq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -36012,13 +36069,13 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgtq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgtq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \
+- int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpgtq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double)), \
+- int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpgtq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double)));})
++ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpgtq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce_f_scalar(__p1, double)), \
++ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpgtq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce_f_scalar(__p1, double)));})
+
+ #define __arm_vcmpleq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -36028,11 +36085,11 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpleq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpleq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpleq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpleq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double)), \
+- int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpleq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double)));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpleq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce_f_scalar(__p1, double)), \
++ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpleq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce_f_scalar(__p1, double)));})
+
+ #define __arm_vcmpltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -36040,25 +36097,25 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpltq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpltq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpltq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \
+- int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpltq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double)), \
+- int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpltq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double)));})
++ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpltq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce_f_scalar(__p1, double)), \
++ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpltq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce_f_scalar(__p1, double)));})
+
+ #define __arm_vcmpneq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
+- int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpneq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double)), \
+- int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpneq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double)), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpneq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce_f_scalar(__p1, double)), \
++ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpneq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce_f_scalar(__p1, double)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+@@ -36113,8 +36170,8 @@ extern void *__ARM_undef;
+ #define __arm_vmaxnmavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmavq_f16 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float16x8_t)), \
+- int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmavq_f32 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float32x4_t)));})
++ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmavq_f16 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float16x8_t)), \
++ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmavq_f32 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+ #define __arm_vmaxnmq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -36125,14 +36182,14 @@ extern void *__ARM_undef;
+ #define __arm_vmaxnmvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_f16 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float16x8_t)), \
+- int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_f32 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float32x4_t)));})
++ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_f16 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float16x8_t)), \
++ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_f32 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+ #define __arm_vmaxnmvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_f16 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float16x8_t)), \
+- int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_f32 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float32x4_t)));})
++ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_f16 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float16x8_t)), \
++ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_f32 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+ #define __arm_vminnmaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -36143,8 +36200,8 @@ extern void *__ARM_undef;
+ #define __arm_vminnmavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vminnmavq_f16 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float16x8_t)), \
+- int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vminnmavq_f32 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float32x4_t)));})
++ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vminnmavq_f16 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float16x8_t)), \
++ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vminnmavq_f32 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+ #define __arm_vbrsrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+@@ -36166,14 +36223,14 @@ extern void *__ARM_undef;
+ #define __arm_vsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vsubq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double)), \
+- int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vsubq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double)), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
++ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vsubq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce_f_scalar(__p1, double)), \
++ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vsubq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce_f_scalar(__p1, double)), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+@@ -36186,8 +36243,8 @@ extern void *__ARM_undef;
+ #define __arm_vminnmvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vminnmvq_f16 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float16x8_t)), \
+- int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vminnmvq_f32 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float32x4_t)));})
++ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vminnmvq_f16 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float16x8_t)), \
++ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vminnmvq_f32 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+ #define __arm_vshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+@@ -36242,12 +36299,12 @@ extern void *__ARM_undef;
+ #define __arm_vrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+@@ -36278,12 +36335,12 @@ extern void *__ARM_undef;
+ #define __arm_vqsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+@@ -36334,12 +36391,12 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)));})
+
+ #define __arm_vqrdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -36347,9 +36404,9 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)));})
+
+ #define __arm_vmlaldavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -36382,8 +36439,8 @@ extern void *__ARM_undef;
+ #define __arm_vqdmulltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+@@ -36396,17 +36453,17 @@ extern void *__ARM_undef;
+ #define __arm_vqdmullbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmullbq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmullbq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+ #define __arm_vqdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+@@ -36414,12 +36471,12 @@ extern void *__ARM_undef;
+ #define __arm_vqaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+@@ -36452,12 +36509,12 @@ extern void *__ARM_undef;
+ #define __arm_vhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+@@ -36482,12 +36539,12 @@ extern void *__ARM_undef;
+ #define __arm_vhsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+@@ -36630,12 +36687,12 @@ extern void *__ARM_undef;
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)));})
+
+ #define __arm_vsriq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -36714,44 +36771,44 @@ extern void *__ARM_undef;
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)));})
+
+ #define __arm_vqdmlashq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)));})
+
+ #define __arm_vqrdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)));})
+
+ #define __arm_vmlasq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)));})
+
+ #define __arm_vqdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)));})
+
+ #define __arm_vqrdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -36941,11 +36998,11 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \
+- int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpgtq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double), p2), \
+- int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpgtq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double), p2), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpgtq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce_f_scalar(__p1, double), p2), \
++ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpgtq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce_f_scalar(__p1, double), p2), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgtq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgtq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+@@ -36957,11 +37014,11 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpleq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpleq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpleq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \
+- int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpleq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double), p2), \
+- int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpleq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double), p2));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpleq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce_f_scalar(__p1, double), p2), \
++ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpleq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce_f_scalar(__p1, double), p2));})
+
+ #define __arm_vcmpltq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -36971,11 +37028,11 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpltq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpltq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpltq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \
+- int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpltq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double), p2), \
+- int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpltq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double), p2));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpltq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce_f_scalar(__p1, double), p2), \
++ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpltq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce_f_scalar(__p1, double), p2));})
+
+ #define __arm_vcmpneq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -36988,14 +37045,14 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpneq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpneq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2), \
+- int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpneq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double), p2), \
+- int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpneq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double), p2));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpneq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce_f_scalar(__p1, double), p2), \
++ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpneq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce_f_scalar(__p1, double), p2));})
+
+ #define __arm_vcvtbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -37049,8 +37106,8 @@ extern void *__ARM_undef;
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vfmaq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(__p2, double)), \
+- int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vfmaq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(__p2, double)), \
++ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vfmaq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce_f_scalar(__p2, double)), \
++ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vfmaq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce_f_scalar(__p2, double)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vfmaq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vfmaq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));})
+
+@@ -37065,8 +37122,8 @@ extern void *__ARM_undef;
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vfmasq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(__p2, double)), \
+- int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vfmasq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(__p2, double)));})
++ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vfmasq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce_f_scalar(__p2, double)), \
++ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vfmasq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce_f_scalar(__p2, double)));})
+
+ #define __arm_vmaxnmaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -37089,14 +37146,14 @@ extern void *__ARM_undef;
+ #define __arm_vmaxnmavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmavq_p_f16 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+- int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmavq_p_f32 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float32x4_t), p2));})
++ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmavq_p_f16 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float16x8_t), p2), \
++ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmavq_p_f32 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+ #define __arm_vmaxnmvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_p_f16 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+- int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_p_f32 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float32x4_t), p2));})
++ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_p_f16 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float16x8_t), p2), \
++ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_p_f32 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+ #define __arm_vminnmaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -37107,14 +37164,14 @@ extern void *__ARM_undef;
+ #define __arm_vminnmavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vminnmavq_p_f16 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+- int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vminnmavq_p_f32 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float32x4_t), p2));})
++ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vminnmavq_p_f16 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float16x8_t), p2), \
++ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vminnmavq_p_f32 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+ #define __arm_vminnmvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vminnmvq_p_f16 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+- int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vminnmvq_p_f32 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float32x4_t), p2));})
++ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vminnmvq_p_f16 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float16x8_t), p2), \
++ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vminnmvq_p_f32 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+ #define __arm_vrndnq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -37176,13 +37233,13 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgeq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgeq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \
+- int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpgeq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double)), \
+- int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpgeq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double)));})
++ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpgeq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce_f_scalar(__p1, double)), \
++ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpgeq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce_f_scalar(__p1, double)));})
+
+ #define __arm_vrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -37283,11 +37340,11 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \
+- int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpgeq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double), p2), \
+- int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpgeq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double), p2), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpgeq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce_f_scalar(__p1, double), p2), \
++ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpgeq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce_f_scalar(__p1, double), p2), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgeq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgeq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+@@ -37316,14 +37373,14 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vaddq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vaddq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int), p3), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int), p3), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int), p3), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int), p3), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int), p3), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int), p3), \
+- int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vaddq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(__p2, double), p3), \
+- int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vaddq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(__p2, double), p3));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vaddq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce_f_scalar(__p2, double), p3), \
++ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vaddq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce_f_scalar(__p2, double), p3));})
+
+ #define __arm_vandq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -37464,15 +37521,15 @@ extern void *__ARM_undef;
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vfmaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vfmaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
+- int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vfmaq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(__p2, double), p3), \
+- int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vfmaq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(__p2, double), p3));})
++ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vfmaq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce_f_scalar(__p2, double), p3), \
++ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vfmaq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce_f_scalar(__p2, double), p3));})
+
+ #define __arm_vfmasq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vfmasq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(__p2, double), p3), \
+- int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vfmasq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(__p2, double), p3));})
++ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vfmasq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce_f_scalar(__p2, double), p3), \
++ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vfmasq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce_f_scalar(__p2, double), p3));})
+
+ #define __arm_vfmsq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -37507,14 +37564,14 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmulq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmulq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
+- int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vmulq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(__p2, double), p3), \
+- int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vmulq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(__p2, double), p3));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vmulq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce_f_scalar(__p2, double), p3), \
++ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vmulq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce_f_scalar(__p2, double), p3));})
+
+ #define __arm_vornq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -37541,14 +37598,14 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vsubq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vsubq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
+- int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vsubq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(__p2, double), p3), \
+- int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vsubq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(__p2, double), p3));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vsubq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce_f_scalar(__p2, double), p3), \
++ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vsubq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce_f_scalar(__p2, double), p3));})
+
+ #define __arm_vorrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -37565,236 +37622,236 @@ extern void *__ARM_undef;
+
+ #define __arm_vld1q(p0) (\
+ _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+- int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld1q_s8 (__ARM_mve_coerce1(p0, int8_t *)), \
+- int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld1q_s16 (__ARM_mve_coerce1(p0, int16_t *)), \
+- int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld1q_s32 (__ARM_mve_coerce1(p0, int32_t *)), \
+- int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld1q_u8 (__ARM_mve_coerce1(p0, uint8_t *)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld1q_u16 (__ARM_mve_coerce1(p0, uint16_t *)), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld1q_u32 (__ARM_mve_coerce1(p0, uint32_t *)), \
+- int (*)[__ARM_mve_type_float16_t_ptr]: __arm_vld1q_f16 (__ARM_mve_coerce1(p0, float16_t *)), \
+- int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vld1q_f32 (__ARM_mve_coerce1(p0, float32_t *))))
++ int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld1q_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *)), \
++ int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld1q_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *)), \
++ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld1q_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *)), \
++ int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld1q_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld1q_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *)), \
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld1q_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *)), \
++ int (*)[__ARM_mve_type_float16_t_ptr]: __arm_vld1q_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *)), \
++ int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vld1q_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *))))
+
+ #define __arm_vld1q_z(p0,p1) ( \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+- int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld1q_z_s8 (__ARM_mve_coerce1(p0, int8_t *), p1), \
+- int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld1q_z_s16 (__ARM_mve_coerce1(p0, int16_t *), p1), \
+- int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld1q_z_s32 (__ARM_mve_coerce1(p0, int32_t *), p1), \
+- int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld1q_z_u8 (__ARM_mve_coerce1(p0, uint8_t *), p1), \
+- int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld1q_z_u16 (__ARM_mve_coerce1(p0, uint16_t *), p1), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld1q_z_u32 (__ARM_mve_coerce1(p0, uint32_t *), p1), \
+- int (*)[__ARM_mve_type_float16_t_ptr]: __arm_vld1q_z_f16 (__ARM_mve_coerce1(p0, float16_t *), p1), \
+- int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vld1q_z_f32 (__ARM_mve_coerce1(p0, float32_t *), p1)))
++ int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld1q_z_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), p1), \
++ int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld1q_z_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), p1), \
++ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld1q_z_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), p1), \
++ int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld1q_z_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), p1), \
++ int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld1q_z_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), p1), \
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld1q_z_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), p1), \
++ int (*)[__ARM_mve_type_float16_t_ptr]: __arm_vld1q_z_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), p1), \
++ int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vld1q_z_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *), p1)))
+
+ #define __arm_vld2q(p0) ( \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+- int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld2q_s8 (__ARM_mve_coerce1(p0, int8_t *)), \
+- int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld2q_s16 (__ARM_mve_coerce1(p0, int16_t *)), \
+- int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld2q_s32 (__ARM_mve_coerce1(p0, int32_t *)), \
+- int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld2q_u8 (__ARM_mve_coerce1(p0, uint8_t *)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld2q_u16 (__ARM_mve_coerce1(p0, uint16_t *)), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld2q_u32 (__ARM_mve_coerce1(p0, uint32_t *)), \
+- int (*)[__ARM_mve_type_float16_t_ptr]: __arm_vld2q_f16 (__ARM_mve_coerce1(p0, float16_t *)), \
+- int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vld2q_f32 (__ARM_mve_coerce1(p0, float32_t *))))
++ int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld2q_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *)), \
++ int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld2q_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *)), \
++ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld2q_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *)), \
++ int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld2q_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld2q_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *)), \
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld2q_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *)), \
++ int (*)[__ARM_mve_type_float16_t_ptr]: __arm_vld2q_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *)), \
++ int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vld2q_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *))))
+
+ #define __arm_vld4q(p0) ( \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+- int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld4q_s8 (__ARM_mve_coerce1(p0, int8_t *)), \
+- int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld4q_s16 (__ARM_mve_coerce1(p0, int16_t *)), \
+- int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld4q_s32 (__ARM_mve_coerce1(p0, int32_t *)), \
+- int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld4q_u8 (__ARM_mve_coerce1(p0, uint8_t *)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld4q_u16 (__ARM_mve_coerce1(p0, uint16_t *)), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld4q_u32 (__ARM_mve_coerce1(p0, uint32_t *)), \
+- int (*)[__ARM_mve_type_float16_t_ptr]: __arm_vld4q_f16 (__ARM_mve_coerce1(p0, float16_t *)), \
+- int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vld4q_f32 (__ARM_mve_coerce1(p0, float32_t *))))
++ int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld4q_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *)), \
++ int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld4q_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *)), \
++ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld4q_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *)), \
++ int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld4q_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld4q_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *)), \
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld4q_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *)), \
++ int (*)[__ARM_mve_type_float16_t_ptr]: __arm_vld4q_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *)), \
++ int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vld4q_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *))))
+
+ #define __arm_vldrhq_gather_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
+- int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_f16 (__ARM_mve_coerce1(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t)));})
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
++ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t)));})
+
+ #define __arm_vldrhq_gather_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+- int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_f16 (__ARM_mve_coerce1(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2));})
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
++ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2));})
+
+ #define __arm_vldrhq_gather_shifted_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
+- int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_f16 (__ARM_mve_coerce1(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t)));})
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
++ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t)));})
+
+ #define __arm_vldrhq_gather_shifted_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+- int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_f16 (__ARM_mve_coerce1(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2));})
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
++ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2));})
+
+ #define __arm_vldrwq_gather_offset(p0,p1) ( \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+- int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_offset_s32 (__ARM_mve_coerce1(p0, int32_t *), p1), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_offset_u32 (__ARM_mve_coerce1(p0, uint32_t *), p1), \
+- int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vldrwq_gather_offset_f32 (__ARM_mve_coerce1(p0, float32_t *), p1)))
++ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_offset_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), p1), \
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_offset_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), p1), \
++ int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vldrwq_gather_offset_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *), p1)))
+
+ #define __arm_vldrwq_gather_offset_z(p0,p1,p2) ( \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+- int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_offset_z_s32 (__ARM_mve_coerce1(p0, int32_t *), p1, p2), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_offset_z_u32 (__ARM_mve_coerce1(p0, uint32_t *), p1, p2), \
+- int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vldrwq_gather_offset_z_f32 (__ARM_mve_coerce1(p0, float32_t *), p1, p2)))
++ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_offset_z_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), p1, p2), \
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_offset_z_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), p1, p2), \
++ int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vldrwq_gather_offset_z_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *), p1, p2)))
+
+ #define __arm_vldrwq_gather_shifted_offset(p0,p1) ( \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+- int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_s32 (__ARM_mve_coerce1(p0, int32_t *), p1), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_u32 (__ARM_mve_coerce1(p0, uint32_t *), p1), \
+- int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vldrwq_gather_shifted_offset_f32 (__ARM_mve_coerce1(p0, float32_t *), p1)))
++ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), p1), \
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), p1), \
++ int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vldrwq_gather_shifted_offset_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *), p1)))
+
+ #define __arm_vldrwq_gather_shifted_offset_z(p0,p1,p2) ( \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+- int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_s32 (__ARM_mve_coerce1(p0, int32_t *), p1, p2), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_u32 (__ARM_mve_coerce1(p0, uint32_t *), p1, p2), \
+- int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_f32 (__ARM_mve_coerce1(p0, float32_t *), p1, p2)))
++ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), p1, p2), \
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), p1, p2), \
++ int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *), p1, p2)))
+
+ #define __arm_vst1q_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_p_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+- int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_p_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+- int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_p_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+- int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+- int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vst1q_p_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+- int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vst1q_p_f32 (__ARM_mve_coerce(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t), p2));})
++ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_p_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t), p2), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_p_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \
++ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_p_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
++ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_p_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_p_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
++ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_p_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
++ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vst1q_p_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t), p2), \
++ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vst1q_p_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+ #define __arm_vst2q(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x2_t]: __arm_vst2q_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x2_t)), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x2_t]: __arm_vst2q_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x2_t)), \
+- int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x2_t]: __arm_vst2q_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x2_t)), \
+- int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x2_t]: __arm_vst2q_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x2_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x2_t]: __arm_vst2q_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x2_t)), \
+- int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x2_t]: __arm_vst2q_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x2_t)), \
+- int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8x2_t]: __arm_vst2q_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8x2_t)), \
+- int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4x2_t]: __arm_vst2q_f32 (__ARM_mve_coerce(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4x2_t)));})
++ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x2_t]: __arm_vst2q_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x2_t)), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x2_t]: __arm_vst2q_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x2_t)), \
++ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x2_t]: __arm_vst2q_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x2_t)), \
++ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x2_t]: __arm_vst2q_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x2_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x2_t]: __arm_vst2q_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x2_t)), \
++ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x2_t]: __arm_vst2q_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x2_t)), \
++ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8x2_t]: __arm_vst2q_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8x2_t)), \
++ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4x2_t]: __arm_vst2q_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4x2_t)));})
+
+ #define __arm_vst1q(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \
+- int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
+- int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+- int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
+- int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vst1q_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t)), \
+- int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vst1q_f32 (__ARM_mve_coerce(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t)));})
++ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \
++ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
++ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
++ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
++ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vst1q_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t)), \
++ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vst1q_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+ #define __arm_vstrhq(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
+- int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vstrhq_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t)));})
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
++ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vstrhq_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t)));})
+
+ #define __arm_vstrhq_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+- int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vstrhq_p_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t), p2));})
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_p_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_p_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_p_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_p_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
++ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vstrhq_p_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t), p2));})
+
+ #define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+- int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_p_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));})
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
++ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_p_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));})
+
+ #define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \
+- int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));})
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \
++ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));})
+
+ #define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+- int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));})
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
++ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));})
+
+ #define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \
+- int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));})
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \
++ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));})
+
+ #define __arm_vstrwq_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_p_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+- int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+- int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_p_f32 (__ARM_mve_coerce(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t), p2));})
++ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_p_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
++ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_p_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
++ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_p_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+ #define __arm_vstrwq(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
+- int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
+- int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_f32 (__ARM_mve_coerce(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t)));})
++ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
++ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
++ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+ #define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \
+- int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));})
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \
++ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));})
+
+ #define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+- int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_p_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));})
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
++ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_p_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));})
+
+ #define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \
+- int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));})
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \
++ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));})
+
+ #define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+- int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));})
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
++ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));})
+
+ #define __arm_vstrwq_scatter_base(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
+@@ -37811,44 +37868,44 @@ extern void *__ARM_undef;
+ #define __arm_vstrwq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int32_t *), p1, __ARM_mve_coerce(__p2, int32x4_t)), \
+- int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1, __ARM_mve_coerce(__p2, uint32x4_t)), \
+- int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_offset_f32 (__ARM_mve_coerce(__p0, float32_t *), p1, __ARM_mve_coerce(__p2, float32x4_t)));})
++ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_s32 (__ARM_mve_coerce_s32_ptr(__p0, int32_t *), p1, __ARM_mve_coerce(__p2, int32x4_t)), \
++ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_u32 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1, __ARM_mve_coerce(__p2, uint32x4_t)), \
++ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_offset_f32 (__ARM_mve_coerce_f32_ptr(__p0, float32_t *), p1, __ARM_mve_coerce(__p2, float32x4_t)));})
+
+ #define __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int32_t *), p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
+- int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+- int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_offset_p_f32 (__ARM_mve_coerce(__p0, float32_t *), p1, __ARM_mve_coerce(__p2, float32x4_t), p3));})
++ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_p_s32 (__ARM_mve_coerce_s32_ptr(__p0, int32_t *), p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
++ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_p_u32 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \
++ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_offset_p_f32 (__ARM_mve_coerce_f32_ptr(__p0, float32_t *), p1, __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+ #define __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \
+- int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \
+- int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_f32 (__ARM_mve_coerce(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));})
++ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \
++ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \
++ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));})
+
+ #define __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
+- int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+- int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_f32 (__ARM_mve_coerce(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t), p3));})
++ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
++ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \
++ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+ #define __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
+- int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+- int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_f32 (__ARM_mve_coerce(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t), p3));})
++ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
++ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \
++ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+ #define __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \
+- int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \
+- int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_f32 (__ARM_mve_coerce(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));})
++ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \
++ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \
++ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));})
+
+ #define __arm_vuninitializedq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+@@ -38021,19 +38078,19 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vaddq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vaddq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
+- int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vaddq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(__p2, double), p3), \
+- int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vaddq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(__p2, double), p3));})
++ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vaddq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce_f_scalar(__p2, double), p3), \
++ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vaddq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce_f_scalar(__p2, double), p3));})
+
+ #define __arm_vandq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+@@ -38156,19 +38213,19 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmulq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmulq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
+- int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vmulq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(__p2, double), p3), \
+- int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vmulq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(__p2, double), p3));})
++ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vmulq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce_f_scalar(__p2, double), p3), \
++ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vmulq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce_f_scalar(__p2, double), p3));})
+
+ #define __arm_vnegq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+@@ -38254,10 +38311,22 @@ extern void *__ARM_undef;
+ #define __arm_vsubq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vsubq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vsubq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
+- int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vsubq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(__p2, double), p3), \
+- int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vsubq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(__p2, double), p3));})
++ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vsubq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce_f_scalar(__p2, double), p3), \
++ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vsubq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce_f_scalar(__p2, double), p3));})
+
+ #define __arm_vcmulq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+@@ -38281,16 +38350,16 @@ extern void *__ARM_undef;
+ #define __arm_vsetq_lane(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vsetq_lane_s8 (__ARM_mve_coerce(__p0, int8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vsetq_lane_s16 (__ARM_mve_coerce(__p0, int16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vsetq_lane_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int64x2_t]: __arm_vsetq_lane_s64 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int64x2_t), p2), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vsetq_lane_u8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vsetq_lane_u16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vsetq_lane_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint64x2_t]: __arm_vsetq_lane_u64 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint64x2_t), p2), \
+- int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vsetq_lane_f16 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+- int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vsetq_lane_f32 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float32x4_t), p2));})
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vsetq_lane_s8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int8x16_t), p2), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vsetq_lane_s16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int16x8_t), p2), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vsetq_lane_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t), p2), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int64x2_t]: __arm_vsetq_lane_s64 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int64x2_t), p2), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vsetq_lane_u8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vsetq_lane_u16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vsetq_lane_u32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint64x2_t]: __arm_vsetq_lane_u64 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint64x2_t), p2), \
++ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vsetq_lane_f16 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float16x8_t), p2), \
++ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vsetq_lane_f32 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+ #else /* MVE Integer. */
+
+@@ -38306,12 +38375,12 @@ extern void *__ARM_undef;
+
+ #define __arm_vst4q(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x4_t]: __arm_vst4q_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x4_t)), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x4_t]: __arm_vst4q_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x4_t)), \
+- int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x4_t]: __arm_vst4q_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x4_t)), \
+- int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x4_t]: __arm_vst4q_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x4_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x4_t]: __arm_vst4q_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x4_t)), \
+- int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x4_t]: __arm_vst4q_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x4_t)));})
++ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x4_t]: __arm_vst4q_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x4_t)), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x4_t]: __arm_vst4q_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x4_t)), \
++ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x4_t]: __arm_vst4q_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x4_t)), \
++ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x4_t]: __arm_vst4q_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x4_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x4_t]: __arm_vst4q_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x4_t)), \
++ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x4_t]: __arm_vst4q_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x4_t)));})
+
+ #define __arm_vabsq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+@@ -38408,12 +38477,12 @@ extern void *__ARM_undef;
+ #define __arm_vcmpneq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+@@ -38440,12 +38509,12 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)));})
+
+ #define __arm_vshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+@@ -38459,12 +38528,12 @@ extern void *__ARM_undef;
+ #define __arm_vrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+@@ -38495,12 +38564,12 @@ extern void *__ARM_undef;
+ #define __arm_vqsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+@@ -38569,12 +38638,12 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)));})
+
+ #define __arm_vqrdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -38582,16 +38651,16 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)));})
+
+ #define __arm_vqdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+@@ -38599,12 +38668,12 @@ extern void *__ARM_undef;
+ #define __arm_vqaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+@@ -38620,7 +38689,11 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vorrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vorrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vorrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vorrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)));})
+
+ #define __arm_vornq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -38635,12 +38708,12 @@ extern void *__ARM_undef;
+ #define __arm_vmulq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+@@ -38715,12 +38788,12 @@ extern void *__ARM_undef;
+ #define __arm_vhsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+@@ -38745,12 +38818,12 @@ extern void *__ARM_undef;
+ #define __arm_vhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+@@ -38800,10 +38873,10 @@ extern void *__ARM_undef;
+ #define __arm_vbicq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vbicq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce1 (__p1, int)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vbicq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce1 (__p1, int)), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vbicq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce1 (__p1, int)), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vbicq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce1 (__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vbicq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar (__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vbicq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar (__p1, int)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vbicq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar (__p1, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vbicq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar (__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+@@ -38820,12 +38893,12 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int)), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int)), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int)), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int)));})
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)));})
+
+ #define __arm_vandq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -38856,12 +38929,12 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)));})
+
+ #define __arm_vqmovntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -38942,16 +39015,16 @@ extern void *__ARM_undef;
+ #define __arm_vqdmulltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+ #define __arm_vqdmullbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmullbq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmullbq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+@@ -38961,9 +39034,9 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)));})
+
+ #define __arm_vcmpgtq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -38971,9 +39044,9 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)));})
+
+ #define __arm_vcmpleq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -38981,9 +39054,9 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpleq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpleq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpleq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)));})
+
+ #define __arm_vcmpltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -38991,20 +39064,20 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpltq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)));})
+
+ #define __arm_vcmpneq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+@@ -39029,12 +39102,12 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2));})
+
+ #define __arm_vbicq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+@@ -39144,25 +39217,25 @@ extern void *__ARM_undef;
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)));})
+
+ #define __arm_vqdmlashq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)));})
+
+ #define __arm_vqrdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)));})
+
+ #define __arm_vqrdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -39225,9 +39298,56 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2));})
++
++
++#define __arm_vcmpgtq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
++ __typeof(p1) __p1 = (p1); \
++ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2));})
++
++#define __arm_vcmpleq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
++ __typeof(p1) __p1 = (p1); \
++ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpleq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpleq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpleq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2));})
++
++#define __arm_vcmpltq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
++ __typeof(p1) __p1 = (p1); \
++ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpltq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpltq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpltq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2));})
++
++#define __arm_vcmpneq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
++ __typeof(p1) __p1 = (p1); \
++ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2));})
+
+ #define __arm_vdupq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -39250,23 +39370,23 @@ extern void *__ARM_undef;
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)));})
+
+ #define __arm_vmlasq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)));})
+
+ #define __arm_vnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -39291,9 +39411,9 @@ extern void *__ARM_undef;
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)));})
+
+ #define __arm_vqdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -39456,12 +39576,12 @@ extern void *__ARM_undef;
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+@@ -39561,12 +39681,12 @@ extern void *__ARM_undef;
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaq_p_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaq_p_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaq_p_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaq_p_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaq_p_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaq_p_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaq_p_s8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaq_p_s16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaq_p_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaq_p_u8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaq_p_u16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaq_p_u32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+ #define __arm_vornq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -39594,12 +39714,12 @@ extern void *__ARM_undef;
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int), p3), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int), p3), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int), p3), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int), p3), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int), p3), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+@@ -39611,12 +39731,12 @@ extern void *__ARM_undef;
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+@@ -39631,12 +39751,12 @@ extern void *__ARM_undef;
+
+ #define __arm_vldrbq_gather_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_s8 (__ARM_mve_coerce1(p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
+- int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_s16 (__ARM_mve_coerce1(p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+- int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_s32 (__ARM_mve_coerce1(p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
+- int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_u8 (__ARM_mve_coerce1(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
+- int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_u16 (__ARM_mve_coerce1(p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+- int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_u32 (__ARM_mve_coerce1(p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
++ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
++ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_s16 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
++ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_s32 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
++ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
++ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_u16 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
++ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_u32 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+ #define __arm_vstrwq_scatter_base_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
+@@ -39645,144 +39765,144 @@ extern void *__ARM_undef;
+
+ #define __arm_vld1q(p0) (\
+ _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+- int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld1q_s8 (__ARM_mve_coerce1(p0, int8_t *)), \
+- int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld1q_s16 (__ARM_mve_coerce1(p0, int16_t *)), \
+- int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld1q_s32 (__ARM_mve_coerce1(p0, int32_t *)), \
+- int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld1q_u8 (__ARM_mve_coerce1(p0, uint8_t *)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld1q_u16 (__ARM_mve_coerce1(p0, uint16_t *)), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld1q_u32 (__ARM_mve_coerce1(p0, uint32_t *))))
++ int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld1q_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *)), \
++ int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld1q_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *)), \
++ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld1q_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *)), \
++ int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld1q_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld1q_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *)), \
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld1q_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *))))
+
+ #define __arm_vldrhq_gather_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+ #define __arm_vldrhq_gather_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+ #define __arm_vldrhq_gather_shifted_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+ #define __arm_vldrhq_gather_shifted_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+ #define __arm_vldrwq_gather_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+- int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_offset_s32 (__ARM_mve_coerce1(__p0, int32_t *), p1), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_offset_u32 (__ARM_mve_coerce1(__p0, uint32_t *), p1));})
++ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_offset_s32 (__ARM_mve_coerce_s32_ptr(__p0, int32_t *), p1), \
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_offset_u32 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1));})
+
+ #define __arm_vldrwq_gather_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+- int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_offset_z_s32 (__ARM_mve_coerce1(__p0, int32_t *), p1, p2), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_offset_z_u32 (__ARM_mve_coerce1(__p0, uint32_t *), p1, p2));})
++ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_offset_z_s32 (__ARM_mve_coerce_s32_ptr(__p0, int32_t *), p1, p2), \
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_offset_z_u32 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1, p2));})
+
+ #define __arm_vldrwq_gather_shifted_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+- int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_s32 (__ARM_mve_coerce1(__p0, int32_t *), p1), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_u32 (__ARM_mve_coerce1(__p0, uint32_t *), p1));})
++ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_s32 (__ARM_mve_coerce_s32_ptr(__p0, int32_t *), p1), \
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_u32 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1));})
+
+ #define __arm_vldrwq_gather_shifted_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+- int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_s32 (__ARM_mve_coerce1(__p0, int32_t *), p1, p2), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_u32 (__ARM_mve_coerce1(__p0, uint32_t *), p1, p2));})
++ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_s32 (__ARM_mve_coerce_s32_ptr(__p0, int32_t *), p1, p2), \
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_u32 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1, p2));})
+
+ #define __arm_vst1q(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \
+- int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
+- int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+- int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
++ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \
++ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
++ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
++ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+ #define __arm_vst1q_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_p_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+- int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_p_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+- int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_p_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+- int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
++ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_p_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t), p2), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_p_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \
++ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_p_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
++ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_p_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_p_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
++ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_p_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+ #define __arm_vst2q(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x2_t]: __arm_vst2q_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x2_t)), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x2_t]: __arm_vst2q_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x2_t)), \
+- int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x2_t]: __arm_vst2q_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x2_t)), \
+- int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x2_t]: __arm_vst2q_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x2_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x2_t]: __arm_vst2q_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x2_t)), \
+- int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x2_t]: __arm_vst2q_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x2_t)));})
++ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x2_t]: __arm_vst2q_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x2_t)), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x2_t]: __arm_vst2q_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x2_t)), \
++ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x2_t]: __arm_vst2q_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x2_t)), \
++ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x2_t]: __arm_vst2q_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x2_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x2_t]: __arm_vst2q_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x2_t)), \
++ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x2_t]: __arm_vst2q_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x2_t)));})
+
+ #define __arm_vstrhq(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+ #define __arm_vstrhq_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_p_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_p_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_p_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_p_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+ #define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+ #define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
+
+ #define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+ #define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
+
+
+ #define __arm_vstrwq(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
+- int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
++ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
++ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+ #define __arm_vstrwq_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_p_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+- int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
++ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_p_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
++ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_p_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+ #define __arm_vstrdq_scatter_base_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
+@@ -39797,58 +39917,58 @@ extern void *__ARM_undef;
+ #define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
+
+ #define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+ #define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
+
+ #define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
++ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
++ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+ #define __arm_vstrwq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int32_t *), p1, __ARM_mve_coerce(__p2, int32x4_t)), \
+- int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1, __ARM_mve_coerce(__p2, uint32x4_t)));})
++ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_s32 (__ARM_mve_coerce_s32_ptr(__p0, int32_t *), p1, __ARM_mve_coerce(__p2, int32x4_t)), \
++ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_u32 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1, __ARM_mve_coerce(__p2, uint32x4_t)));})
+
+ #define __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int32_t *), p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
+- int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));})
++ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_p_s32 (__ARM_mve_coerce_s32_ptr(__p0, int32_t *), p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
++ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_p_u32 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+ #define __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \
+- int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)));})
++ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \
++ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)));})
+
+ #define __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
+- int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));})
++ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
++ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+ #define __arm_vuninitializedq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+@@ -39953,15 +40073,15 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));})
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3));})
+
+ #define __arm_vcaddq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+@@ -40055,15 +40175,15 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));})
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3));})
+
+ #define __arm_vnegq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+@@ -40147,29 +40267,45 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
+
+ #define __arm_vld1q_z(p0,p1) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+- int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld1q_z_s8 (__ARM_mve_coerce1(p0, int8_t *), p1), \
+- int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld1q_z_s16 (__ARM_mve_coerce1(p0, int16_t *), p1), \
+- int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld1q_z_s32 (__ARM_mve_coerce1(p0, int32_t *), p1), \
+- int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld1q_z_u8 (__ARM_mve_coerce1(p0, uint8_t *), p1), \
+- int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld1q_z_u16 (__ARM_mve_coerce1(p0, uint16_t *), p1), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld1q_z_u32 (__ARM_mve_coerce1(p0, uint32_t *), p1)))
++ int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld1q_z_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), p1), \
++ int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld1q_z_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), p1), \
++ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld1q_z_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), p1), \
++ int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld1q_z_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), p1), \
++ int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld1q_z_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), p1), \
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld1q_z_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), p1)))
+
+ #define __arm_vld2q(p0) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+- int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld2q_s8 (__ARM_mve_coerce1(p0, int8_t *)), \
+- int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld2q_s16 (__ARM_mve_coerce1(p0, int16_t *)), \
+- int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld2q_s32 (__ARM_mve_coerce1(p0, int32_t *)), \
+- int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld2q_u8 (__ARM_mve_coerce1(p0, uint8_t *)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld2q_u16 (__ARM_mve_coerce1(p0, uint16_t *)), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld2q_u32 (__ARM_mve_coerce1(p0, uint32_t *))))
++ int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld2q_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *)), \
++ int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld2q_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *)), \
++ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld2q_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *)), \
++ int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld2q_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld2q_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *)), \
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld2q_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *))))
+
+
+ #define __arm_vld4q(p0) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+- int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld4q_s8 (__ARM_mve_coerce1(p0, int8_t *)), \
+- int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld4q_s16 (__ARM_mve_coerce1(p0, int16_t *)), \
+- int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld4q_s32 (__ARM_mve_coerce1(p0, int32_t *)), \
+- int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld4q_u8 (__ARM_mve_coerce1(p0, uint8_t *)), \
+- int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld4q_u16 (__ARM_mve_coerce1(p0, uint16_t *)), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld4q_u32 (__ARM_mve_coerce1(p0, uint32_t *))))
++ int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld4q_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *)), \
++ int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld4q_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *)), \
++ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld4q_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *)), \
++ int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld4q_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *)), \
++ int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld4q_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *)), \
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld4q_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *))))
++
++#define __arm_vsubq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
++ __typeof(p2) __p2 = (p2); \
++ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3));})
+
+ #define __arm_vgetq_lane(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+@@ -40185,14 +40321,14 @@ extern void *__ARM_undef;
+ #define __arm_vsetq_lane(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vsetq_lane_s8 (__ARM_mve_coerce(__p0, int8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vsetq_lane_s16 (__ARM_mve_coerce(__p0, int16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vsetq_lane_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int64x2_t]: __arm_vsetq_lane_s64 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int64x2_t), p2), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vsetq_lane_u8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vsetq_lane_u16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vsetq_lane_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint64x2_t]: __arm_vsetq_lane_u64 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint64x2_t), p2));})
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vsetq_lane_s8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int8x16_t), p2), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vsetq_lane_s16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int16x8_t), p2), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vsetq_lane_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t), p2), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int64x2_t]: __arm_vsetq_lane_s64 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int64x2_t), p2), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vsetq_lane_u8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vsetq_lane_u16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vsetq_lane_u32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint64x2_t]: __arm_vsetq_lane_u64 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint64x2_t), p2));})
+
+ #endif /* MVE Integer. */
+
+@@ -40303,62 +40439,62 @@ extern void *__ARM_undef;
+ #define __arm_vdwdupq_x_u8(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_x_n_u8 ((uint32_t) __p1, p2, p3, p4), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));})
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_x_wb_u8 (__ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3, p4));})
+
+ #define __arm_vdwdupq_x_u16(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_x_n_u16 ((uint32_t) __p1, p2, p3, p4), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));})
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_x_wb_u16 (__ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3, p4));})
+
+ #define __arm_vdwdupq_x_u32(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_x_n_u32 ((uint32_t) __p1, p2, p3, p4), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));})
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_x_wb_u32 (__ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3, p4));})
+
+ #define __arm_viwdupq_x_u8(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_x_n_u8 ((uint32_t) __p1, p2, p3, p4), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));})
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_x_wb_u8 (__ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3, p4));})
+
+ #define __arm_viwdupq_x_u16(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_x_n_u16 ((uint32_t) __p1, p2, p3, p4), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));})
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_x_wb_u16 (__ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3, p4));})
+
+ #define __arm_viwdupq_x_u32(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_x_n_u32 ((uint32_t) __p1, p2, p3, p4), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));})
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_x_wb_u32 (__ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3, p4));})
+
+ #define __arm_vidupq_x_u8(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vidupq_x_n_u8 ((uint32_t) __p1, p2, p3), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));})
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_x_wb_u8 (__ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3));})
+
+ #define __arm_vddupq_x_u8(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vddupq_x_n_u8 ((uint32_t) __p1, p2, p3), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));})
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_x_wb_u8 (__ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3));})
+
+ #define __arm_vidupq_x_u16(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vidupq_x_n_u16 ((uint32_t) __p1, p2, p3), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));})
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_x_wb_u16 (__ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3));})
+
+ #define __arm_vddupq_x_u16(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vddupq_x_n_u16 ((uint32_t) __p1, p2, p3), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));})
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_x_wb_u16 (__ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3));})
+
+ #define __arm_vidupq_x_u32(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vidupq_x_n_u32 ((uint32_t) __p1, p2, p3), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));})
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_x_wb_u32 (__ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3));})
+
+ #define __arm_vddupq_x_u32(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vddupq_x_n_u32 ((uint32_t) __p1, p2, p3), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));})
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_x_wb_u32 (__ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3));})
+
+ #define __arm_vshrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+@@ -40372,12 +40508,12 @@ extern void *__ARM_undef;
+ #define __arm_vhaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+@@ -40402,12 +40538,12 @@ extern void *__ARM_undef;
+ #define __arm_vhsubq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhsubq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhsubq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhsubq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+@@ -40447,20 +40583,20 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_wb_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t)));})
+
+ #define __arm_vldrdq_gather_offset(p0,p1) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+- int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_offset_s64 (__ARM_mve_coerce1(p0, int64_t *), p1), \
+- int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_offset_u64 (__ARM_mve_coerce1(p0, uint64_t *), p1)))
++ int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_offset_s64 (__ARM_mve_coerce_s64_ptr(p0, int64_t *), p1), \
++ int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_offset_u64 (__ARM_mve_coerce_u64_ptr(p0, uint64_t *), p1)))
+
+ #define __arm_vldrdq_gather_offset_z(p0,p1,p2) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+- int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_offset_z_s64 (__ARM_mve_coerce1(p0, int64_t *), p1, p2), \
+- int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_offset_z_u64 (__ARM_mve_coerce1(p0, uint64_t *), p1, p2)))
++ int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_offset_z_s64 (__ARM_mve_coerce_s64_ptr(p0, int64_t *), p1, p2), \
++ int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_offset_z_u64 (__ARM_mve_coerce_u64_ptr(p0, uint64_t *), p1, p2)))
+
+ #define __arm_vldrdq_gather_shifted_offset(p0,p1) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+- int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_shifted_offset_s64 (__ARM_mve_coerce1(p0, int64_t *), p1), \
+- int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_shifted_offset_u64 (__ARM_mve_coerce1(p0, uint64_t *), p1)))
++ int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_shifted_offset_s64 (__ARM_mve_coerce_s64_ptr(p0, int64_t *), p1), \
++ int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_shifted_offset_u64 (__ARM_mve_coerce_u64_ptr(p0, uint64_t *), p1)))
+
+ #define __arm_vldrdq_gather_shifted_offset_z(p0,p1,p2) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+- int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_shifted_offset_z_s64 (__ARM_mve_coerce1(p0, int64_t *), p1, p2), \
+- int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_shifted_offset_z_u64 (__ARM_mve_coerce1(p0, uint64_t *), p1, p2)))
++ int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_shifted_offset_z_s64 (__ARM_mve_coerce_s64_ptr(p0, int64_t *), p1, p2), \
++ int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_shifted_offset_z_u64 (__ARM_mve_coerce_u64_ptr(p0, uint64_t *), p1, p2)))
+
+ #define __arm_vadciq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -40516,36 +40652,36 @@ extern void *__ARM_undef;
+
+ #define __arm_vldrbq_gather_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_z_s8 (__ARM_mve_coerce1(p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+- int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_z_s16 (__ARM_mve_coerce1(p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+- int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_z_s32 (__ARM_mve_coerce1(p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+- int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_z_u8 (__ARM_mve_coerce1(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+- int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_z_u16 (__ARM_mve_coerce1(p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+- int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_z_u32 (__ARM_mve_coerce1(p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
++ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_z_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
++ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_z_s16 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
++ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_z_s32 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
++ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_z_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
++ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_z_u16 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
++ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_z_u32 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+ #define __arm_vqrdmlahq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3));})
+
+ #define __arm_vqrdmlashq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3));})
+
+ #define __arm_vqdmlashq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3));})
+
+ #define __arm_vqrshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -40646,12 +40782,12 @@ extern void *__ARM_undef;
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqsubq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqsubq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqsubq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+@@ -40666,9 +40802,9 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmulhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmulhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmulhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3));})
+
+ #define __arm_vqrdmlsdhxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -40794,17 +40930,17 @@ extern void *__ARM_undef;
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaq_p_s16 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaq_p_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavaq_p_u16 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavaq_p_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaq_p_s16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaq_p_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavaq_p_u16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavaq_p_u32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+ #define __arm_vmlaldavaxq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaxq_p_s16 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaxq_p_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaxq_p_s16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaxq_p_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+ #define __arm_vmlsldavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -40874,10 +41010,10 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmvnq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmvnq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmvnq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmvnq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce1(__p1, int) , p2), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmvnq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce1(__p1, int) , p2), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmvnq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce1(__p1, int) , p2), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmvnq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce1(__p1, int) , p2));})
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmvnq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int) , p2), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmvnq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int) , p2), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmvnq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int) , p2), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmvnq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int) , p2));})
+
+ #define __arm_vorrq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+@@ -40943,12 +41079,12 @@ extern void *__ARM_undef;
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+@@ -40982,12 +41118,12 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhsubq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhsubq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhsubq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3));})
+
+ #define __arm_vmaxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -41015,23 +41151,23 @@ extern void *__ARM_undef;
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3));})
+
+ #define __arm_vmlasq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3));})
+
+ #define __arm_vmulhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -41077,12 +41213,12 @@ extern void *__ARM_undef;
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+@@ -41094,17 +41230,17 @@ extern void *__ARM_undef;
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));})
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3));})
+
+ #define __arm_vqdmulhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
+- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
++ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmulhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+@@ -41115,15 +41251,15 @@ extern void *__ARM_undef;
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmullbq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmullbq_m_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
+- int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_m_n_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));})
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_m_n_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3));})
+
+ #define __arm_vqdmulltq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
+- int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_m_n_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
++ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
++ int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_m_n_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulltq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulltq_m_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+@@ -41189,9 +41325,9 @@ extern void *__ARM_undef;
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaxq_p_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaxq_p_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaxq_p_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaxq_p_s8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaxq_p_s16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaxq_p_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+ #define __arm_vmullbq_poly_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -41202,12 +41338,12 @@ extern void *__ARM_undef;
+
+ #define __arm_vldrbq_gather_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_s8(__ARM_mve_coerce1(p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
+- int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_s16(__ARM_mve_coerce1(p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+- int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_s32(__ARM_mve_coerce1(p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
+- int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_u8(__ARM_mve_coerce1(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
+- int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_u16(__ARM_mve_coerce1(p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+- int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_u32(__ARM_mve_coerce1(p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
++ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_s8(__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
++ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_s16(__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
++ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_s32(__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
++ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_u8(__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
++ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_u16(__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
++ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_u32(__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+ #define __arm_vidupq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -41215,9 +41351,9 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vidupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), (uint32_t) __p1, p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vidupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), (uint32_t) __p1, p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vidupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), (uint32_t) __p1, p2, p3), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3));})
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3));})
+
+ #define __arm_vddupq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -41225,89 +41361,89 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vddupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), (uint32_t) __p1, p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vddupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), (uint32_t) __p1, p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vddupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), (uint32_t) __p1, p2, p3), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3));})
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3));})
+
+ #define __arm_vidupq_u16(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vidupq_n_u16 ((uint32_t) __p0, p1), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_wb_u16 (__ARM_mve_coerce(__p0, uint32_t *), p1));})
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_wb_u16 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1));})
+
+ #define __arm_vidupq_u32(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vidupq_n_u32 ((uint32_t) __p0, p1), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_wb_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1));})
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_wb_u32 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1));})
+
+ #define __arm_vidupq_u8(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vidupq_n_u8 ((uint32_t) __p0, p1), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_wb_u8 (__ARM_mve_coerce(__p0, uint32_t *), p1));})
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_wb_u8 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1));})
+
+ #define __arm_vddupq_u16(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vddupq_n_u16 ((uint32_t) __p0, p1), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_wb_u16 (__ARM_mve_coerce(__p0, uint32_t *), p1));})
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_wb_u16 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1));})
+
+ #define __arm_vddupq_u32(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vddupq_n_u32 ((uint32_t) __p0, p1), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_wb_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1));})
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_wb_u32 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1));})
+
+ #define __arm_vddupq_u8(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vddupq_n_u8 ((uint32_t) __p0, p1), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_wb_u8 (__ARM_mve_coerce(__p0, uint32_t *), p1));})
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_wb_u8 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1));})
+
+ #define __arm_viwdupq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_viwdupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_viwdupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_viwdupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));})
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_viwdupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2, p3, p4), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_viwdupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2, p3, p4), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_viwdupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2, p3, p4), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3, p4), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3, p4), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3, p4));})
+
+ #define __arm_viwdupq_u16(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+- int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_n_u16 (__ARM_mve_coerce(__p0, uint32_t), p1, (const int) p2), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_wb_u16 (__ARM_mve_coerce(__p0, uint32_t *), p1, (const int) p2));})
++ int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_n_u16 (__ARM_mve_coerce_i_scalar(__p0, int), p1, (const int) p2), \
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_wb_u16 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1, (const int) p2));})
+
+ #define __arm_viwdupq_u32(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+- int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_n_u32 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_wb_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));})
++ int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_n_u32 (__ARM_mve_coerce_i_scalar(__p0, int), p1, p2), \
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_wb_u32 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1, p2));})
+
+ #define __arm_viwdupq_u8(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+- int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_n_u8 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_wb_u8 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));})
++ int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_n_u8 (__ARM_mve_coerce_i_scalar(__p0, int), p1, p2), \
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_wb_u8 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1, p2));})
+
+ #define __arm_vdwdupq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vdwdupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vdwdupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vdwdupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));})
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vdwdupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2, p3, p4), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vdwdupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2, p3, p4), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vdwdupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2, p3, p4), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3, p4), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3, p4), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3, p4));})
+
+ #define __arm_vdwdupq_u16(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+- int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_n_u16 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_wb_u16 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));})
++ int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_n_u16 (__ARM_mve_coerce_i_scalar(__p0, int), p1, p2), \
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_wb_u16 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1, p2));})
+
+ #define __arm_vdwdupq_u32(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+- int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_n_u32 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_wb_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));})
++ int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_n_u32 (__ARM_mve_coerce_i_scalar(__p0, int), p1, p2), \
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_wb_u32 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1, p2));})
+
+ #define __arm_vdwdupq_u8(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+- int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_n_u8 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \
+- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_wb_u8 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));})
++ int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_n_u8 (__ARM_mve_coerce_i_scalar(__p0, int), p1, p2), \
++ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_wb_u8 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1, p2));})
+
+ #define __arm_vshlcq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+@@ -41343,14 +41479,14 @@ extern void *__ARM_undef;
+ #define __arm_vaddlvaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vaddlvaq_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vaddlvaq_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vaddlvaq_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t)), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vaddlvaq_u32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+ #define __arm_vaddlvaq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vaddlvaq_p_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vaddlvaq_p_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vaddlvaq_p_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t), p2), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vaddlvaq_p_u32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+ #define __arm_vaddlvq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+@@ -41365,22 +41501,22 @@ extern void *__ARM_undef;
+ #define __arm_vaddvaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vaddvaq_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vaddvaq_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vaddvaq_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vaddvaq_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vaddvaq_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vaddvaq_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vaddvaq_s8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int8x16_t)), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vaddvaq_s16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int16x8_t)), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vaddvaq_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t)), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vaddvaq_u8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint8x16_t)), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vaddvaq_u16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint16x8_t)), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vaddvaq_u32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+ #define __arm_vaddvaq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vaddvaq_p_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vaddvaq_p_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vaddvaq_p_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vaddvaq_p_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vaddvaq_p_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vaddvaq_p_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vaddvaq_p_s8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int8x16_t), p2), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vaddvaq_p_s16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int16x8_t), p2), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vaddvaq_p_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t), p2), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vaddvaq_p_u8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vaddvaq_p_u16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vaddvaq_p_u32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+ #define __arm_vaddvq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+@@ -41406,9 +41542,9 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpcsq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpcsq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpcsq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));})
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)));})
+
+ #define __arm_vcmpcsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -41416,9 +41552,9 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpcsq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpcsq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpcsq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2));})
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2));})
+
+ #define __arm_vcmphiq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -41426,16 +41562,16 @@ extern void *__ARM_undef;
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmphiq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmphiq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmphiq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmphiq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmphiq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmphiq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));})
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmphiq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmphiq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmphiq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)));})
+
+ #define __arm_vcmphiq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmphiq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \
+- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmphiq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \
+- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmphiq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2), \
++ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmphiq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmphiq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
++ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmphiq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmphiq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmphiq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmphiq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+@@ -41532,34 +41668,34 @@ extern void *__ARM_undef;
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaq_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaq_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaq_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaq_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaq_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaq_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaq_s8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaq_s16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaq_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaq_u8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaq_u16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaq_u32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
+
+ #define __arm_vmladavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaq_p_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaq_p_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaq_p_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaq_p_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaq_p_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaq_p_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaq_p_s8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaq_p_s16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaq_p_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaq_p_u8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaq_p_u16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaq_p_u32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+ #define __arm_vmladavaxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaxq_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaxq_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaxq_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaxq_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaxq_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaxq_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaxq_s8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaxq_s16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaxq_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaxq_u8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaxq_u16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaxq_u32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
+
+ #define __arm_vmladavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -41602,17 +41738,17 @@ extern void *__ARM_undef;
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaq_s16 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaq_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavaq_u16 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavaq_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaq_s16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaq_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavaq_u16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavaq_u32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
+
+ #define __arm_vmlaldavaxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaxq_s16 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaxq_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaxq_s16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaxq_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
+
+ #define __arm_vmlaldavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+@@ -41776,22 +41912,22 @@ extern void *__ARM_undef;
+
+ #define __arm_vstrbq(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vstrbq_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \
+- int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrbq_s16 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int16x8_t)), \
+- int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrbq_s32 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
+- int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
+- int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_u16 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+- int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_u32 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
++ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vstrbq_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \
++ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrbq_s16 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, int16x8_t)), \
++ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrbq_s32 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
++ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
++ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_u16 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
++ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_u32 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+ #define __arm_vstrbq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+- int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vstrbq_p_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+- int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrbq_p_s16 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+- int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrbq_p_s32 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+- int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_p_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+- int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_p_u16 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+- int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_p_u32 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
++ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vstrbq_p_s8 (__ARM_mve_coerce_s8_ptr(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t), p2), \
++ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrbq_p_s16 (__ARM_mve_coerce_s8_ptr(__p0, int8_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \
++ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrbq_p_s32 (__ARM_mve_coerce_s8_ptr(__p0, int8_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
++ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_p_u8 (__ARM_mve_coerce_u8_ptr(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
++ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_p_u16 (__ARM_mve_coerce_u8_ptr(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
++ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_p_u32 (__ARM_mve_coerce_u8_ptr(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+ #define __arm_vstrdq_scatter_base(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
+@@ -41807,61 +41943,61 @@ extern void *__ARM_undef;
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhaq_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhaq_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhaq_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhaq_u32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
+
+ #define __arm_vrmlaldavhaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhaq_p_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+- int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhaq_p_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhaq_p_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
++ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhaq_p_u32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+ #define __arm_vstrbq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vstrbq_scatter_offset_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
+- int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrbq_scatter_offset_s16 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+- int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrbq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
+- int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_scatter_offset_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \
+- int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_scatter_offset_u16 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
+- int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
++ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vstrbq_scatter_offset_s8 (__ARM_mve_coerce_s8_ptr(__p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
++ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrbq_scatter_offset_s16 (__ARM_mve_coerce_s8_ptr(__p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
++ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrbq_scatter_offset_s32 (__ARM_mve_coerce_s8_ptr(__p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
++ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_scatter_offset_u8 (__ARM_mve_coerce_u8_ptr(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \
++ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_scatter_offset_u16 (__ARM_mve_coerce_u8_ptr(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
++ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_scatter_offset_u32 (__ARM_mve_coerce_u8_ptr(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
+
+ #define __arm_vstrbq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vstrbq_scatter_offset_p_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+- int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrbq_scatter_offset_p_s16 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+- int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrbq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+- int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_scatter_offset_p_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+- int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_scatter_offset_p_u16 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+- int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
++ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vstrbq_scatter_offset_p_s8 (__ARM_mve_coerce_s8_ptr(__p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
++ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrbq_scatter_offset_p_s16 (__ARM_mve_coerce_s8_ptr(__p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
++ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrbq_scatter_offset_p_s32 (__ARM_mve_coerce_s8_ptr(__p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
++ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_scatter_offset_p_u8 (__ARM_mve_coerce_u8_ptr(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
++ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_scatter_offset_p_u16 (__ARM_mve_coerce_u8_ptr(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
++ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_scatter_offset_p_u32 (__ARM_mve_coerce_u8_ptr(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+ #define __arm_vstrdq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_offset_p_s64 (__ARM_mve_coerce(__p0, int64_t *), p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \
+- int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_offset_p_u64 (__ARM_mve_coerce(__p0, uint64_t *), p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));})
++ int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_offset_p_s64 (__ARM_mve_coerce_s64_ptr(__p0, int64_t *), p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \
++ int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_offset_p_u64 (__ARM_mve_coerce_u64_ptr(__p0, uint64_t *), p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));})
+
+ #define __arm_vstrdq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_offset_s64 (__ARM_mve_coerce(__p0, int64_t *), p1, __ARM_mve_coerce(__p2, int64x2_t)), \
+- int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_offset_u64 (__ARM_mve_coerce(__p0, uint64_t *), p1, __ARM_mve_coerce(__p2, uint64x2_t)));})
++ int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_offset_s64 (__ARM_mve_coerce_s64_ptr(__p0, int64_t *), p1, __ARM_mve_coerce(__p2, int64x2_t)), \
++ int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_offset_u64 (__ARM_mve_coerce_u64_ptr(__p0, uint64_t *), p1, __ARM_mve_coerce(__p2, uint64x2_t)));})
+
+ #define __arm_vstrdq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_shifted_offset_p_s64 (__ARM_mve_coerce(__p0, int64_t *), p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \
+- int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_shifted_offset_p_u64 (__ARM_mve_coerce(__p0, uint64_t *), p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));})
++ int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_shifted_offset_p_s64 (__ARM_mve_coerce_s64_ptr(__p0, int64_t *), p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \
++ int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_shifted_offset_p_u64 (__ARM_mve_coerce_u64_ptr(__p0, uint64_t *), p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));})
+
+ #define __arm_vstrdq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \
+- int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_shifted_offset_s64 (__ARM_mve_coerce(__p0, int64_t *), p1, __ARM_mve_coerce(__p2, int64x2_t)), \
+- int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_shifted_offset_u64 (__ARM_mve_coerce(__p0, uint64_t *), p1, __ARM_mve_coerce(__p2, uint64x2_t)));})
++ int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_shifted_offset_s64 (__ARM_mve_coerce_s64_ptr(__p0, int64_t *), p1, __ARM_mve_coerce(__p2, int64x2_t)), \
++ int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_shifted_offset_u64 (__ARM_mve_coerce_u64_ptr(__p0, uint64_t *), p1, __ARM_mve_coerce(__p2, uint64x2_t)));})
+
+ #endif /* __cplusplus */
+ #endif /* __ARM_FEATURE_MVE */
+--- a/src/gcc/config/arm/constraints.md
++++ b/src/gcc/config/arm/constraints.md
+@@ -102,10 +102,6 @@ (define_constraint "Rg"
+ (match_test "TARGET_HAVE_MVE && ((ival == 1) || (ival == 2)
+ || (ival == 4) || (ival == 8))")))
+
+-;; True if the immediate is multiple of 8 and in range of -/+ 1016 for MVE.
+-(define_predicate "mve_vldrd_immediate"
+- (match_test "satisfies_constraint_Ri (op)"))
+-
+ (define_register_constraint "t" "TARGET_32BIT ? VFP_LO_REGS : NO_REGS"
+ "The VFP registers @code{s0}-@code{s31}.")
+
+@@ -566,6 +562,22 @@ (define_constraint "US"
+ (match_code "symbol_ref")
+ )
+
++;; True if the immediate is the range +/- 1016 and multiple of 8 for MVE.
++(define_constraint "Ri"
++ "@internal In Thumb-2 state a constant is multiple of 8 and in range
++ of -/+ 1016 for MVE"
++ (and (match_code "const_int")
++ (match_test "TARGET_HAVE_MVE && (-1016 <= ival) && (ival <= 1016)
++ && ((ival % 8) == 0)")))
++
++;; True if the immediate is multiple of 2 and in range of -/+ 252 for MVE.
++(define_constraint "Rl"
++ "@internal In Thumb-2 state a constant is multiple of 2 and in range
++ of -/+ 252 for MVE"
++ (and (match_code "const_int")
++ (match_test "TARGET_HAVE_MVE && (-252 <= ival) && (ival <= 252)
++ && ((ival % 2) == 0)")))
++
+ (define_memory_constraint "Uz"
+ "@internal
+ A memory access that is accessible as an LDC/STC operand"
+--- a/src/gcc/config/arm/mve.md
++++ b/src/gcc/config/arm/mve.md
+@@ -134,7 +134,7 @@ (define_insn "mve_vrndq_m_f<mode>"
+ VRNDQ_M_F))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vpst\;vrintzt.f%#<V_sz_elem> %q0, %q2"
++ "vpst\;vrintzt.f%#<V_sz_elem>\t%q0, %q2"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -232,7 +232,7 @@ (define_insn "mve_vrev64q_f<mode>"
+ VREV64Q_F))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vrev64.%#<V_sz_elem> %q0, %q1"
++ "vrev64.%#<V_sz_elem>\t%q0, %q1"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -245,7 +245,7 @@ (define_insn "mve_vnegq_f<mode>"
+ (neg:MVE_0 (match_operand:MVE_0 1 "s_register_operand" "w")))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vneg.f%#<V_sz_elem> %q0, %q1"
++ "vneg.f%#<V_sz_elem>\t%q0, %q1"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -259,7 +259,7 @@ (define_insn "mve_vdupq_n_f<mode>"
+ VDUPQ_N_F))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vdup.%#<V_sz_elem> %q0, %1"
++ "vdup.%#<V_sz_elem>\t%q0, %1"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -272,7 +272,7 @@ (define_insn "mve_vabsq_f<mode>"
+ (abs:MVE_0 (match_operand:MVE_0 1 "s_register_operand" "w")))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vabs.f%#<V_sz_elem> %q0, %q1"
++ "vabs.f%#<V_sz_elem>\t%q0, %q1"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -286,7 +286,7 @@ (define_insn "mve_vrev32q_fv8hf"
+ VREV32Q_F))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vrev32.16 %q0, %q1"
++ "vrev32.16\t%q0, %q1"
+ [(set_attr "type" "mve_move")
+ ])
+ ;;
+@@ -299,7 +299,7 @@ (define_insn "mve_vcvttq_f32_f16v4sf"
+ VCVTTQ_F32_F16))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vcvtt.f32.f16 %q0, %q1"
++ "vcvtt.f32.f16\t%q0, %q1"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -313,7 +313,7 @@ (define_insn "mve_vcvtbq_f32_f16v4sf"
+ VCVTBQ_F32_F16))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vcvtb.f32.f16 %q0, %q1"
++ "vcvtb.f32.f16\t%q0, %q1"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -327,7 +327,7 @@ (define_insn "mve_vcvtq_to_f_<supf><mode>"
+ VCVTQ_TO_F))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vcvt.f%#<V_sz_elem>.<supf>%#<V_sz_elem> %q0, %q1"
++ "vcvt.f%#<V_sz_elem>.<supf>%#<V_sz_elem>\t%q0, %q1"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -341,7 +341,7 @@ (define_insn "mve_vrev64q_<supf><mode>"
+ VREV64Q))
+ ]
+ "TARGET_HAVE_MVE"
+- "vrev64.%#<V_sz_elem> %q0, %q1"
++ "vrev64.%#<V_sz_elem>\t%q0, %q1"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -355,7 +355,7 @@ (define_insn "mve_vcvtq_from_f_<supf><mode>"
+ VCVTQ_FROM_F))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vcvt.<supf>%#<V_sz_elem>.f%#<V_sz_elem> %q0, %q1"
++ "vcvt.<supf>%#<V_sz_elem>.f%#<V_sz_elem>\t%q0, %q1"
+ [(set_attr "type" "mve_move")
+ ])
+ ;; [vqnegq_s])
+@@ -367,7 +367,7 @@ (define_insn "mve_vqnegq_s<mode>"
+ VQNEGQ_S))
+ ]
+ "TARGET_HAVE_MVE"
+- "vqneg.s%#<V_sz_elem> %q0, %q1"
++ "vqneg.s%#<V_sz_elem>\t%q0, %q1"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -381,7 +381,7 @@ (define_insn "mve_vqabsq_s<mode>"
+ VQABSQ_S))
+ ]
+ "TARGET_HAVE_MVE"
+- "vqabs.s%#<V_sz_elem> %q0, %q1"
++ "vqabs.s%#<V_sz_elem>\t%q0, %q1"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -394,7 +394,7 @@ (define_insn "mve_vnegq_s<mode>"
+ (neg:MVE_2 (match_operand:MVE_2 1 "s_register_operand" "w")))
+ ]
+ "TARGET_HAVE_MVE"
+- "vneg.s%#<V_sz_elem> %q0, %q1"
++ "vneg.s%#<V_sz_elem>\t%q0, %q1"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -428,7 +428,7 @@ (define_insn "mve_vdupq_n_<supf><mode>"
+ VDUPQ_N))
+ ]
+ "TARGET_HAVE_MVE"
+- "vdup.%#<V_sz_elem> %q0, %1"
++ "vdup.%#<V_sz_elem>\t%q0, %1"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -441,7 +441,7 @@ (define_insn "@mve_vclzq_s<mode>"
+ (clz:MVE_2 (match_operand:MVE_2 1 "s_register_operand" "w")))
+ ]
+ "TARGET_HAVE_MVE"
+- "vclz.i%#<V_sz_elem> %q0, %q1"
++ "vclz.i%#<V_sz_elem>\t%q0, %q1"
+ [(set_attr "type" "mve_move")
+ ])
+ (define_expand "mve_vclzq_u<mode>"
+@@ -462,7 +462,7 @@ (define_insn "mve_vclsq_s<mode>"
+ VCLSQ_S))
+ ]
+ "TARGET_HAVE_MVE"
+- "vcls.s%#<V_sz_elem> %q0, %q1"
++ "vcls.s%#<V_sz_elem>\t%q0, %q1"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -517,7 +517,7 @@ (define_insn "mve_vmovltq_<supf><mode>"
+ VMOVLTQ))
+ ]
+ "TARGET_HAVE_MVE"
+- "vmovlt.<supf>%#<V_sz_elem> %q0, %q1"
++ "vmovlt.<supf>%#<V_sz_elem>\t%q0, %q1"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -531,7 +531,7 @@ (define_insn "mve_vmovlbq_<supf><mode>"
+ VMOVLBQ))
+ ]
+ "TARGET_HAVE_MVE"
+- "vmovlb.<supf>%#<V_sz_elem> %q0, %q1"
++ "vmovlb.<supf>%#<V_sz_elem>\t%q0, %q1"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -545,7 +545,7 @@ (define_insn "mve_vcvtpq_<supf><mode>"
+ VCVTPQ))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vcvtp.<supf>%#<V_sz_elem>.f%#<V_sz_elem> %q0, %q1"
++ "vcvtp.<supf>%#<V_sz_elem>.f%#<V_sz_elem>\t%q0, %q1"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -559,7 +559,7 @@ (define_insn "mve_vcvtnq_<supf><mode>"
+ VCVTNQ))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vcvtn.<supf>%#<V_sz_elem>.f%#<V_sz_elem> %q0, %q1"
++ "vcvtn.<supf>%#<V_sz_elem>.f%#<V_sz_elem>\t%q0, %q1"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -573,7 +573,7 @@ (define_insn "mve_vcvtmq_<supf><mode>"
+ VCVTMQ))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vcvtm.<supf>%#<V_sz_elem>.f%#<V_sz_elem> %q0, %q1"
++ "vcvtm.<supf>%#<V_sz_elem>.f%#<V_sz_elem>\t%q0, %q1"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -587,7 +587,7 @@ (define_insn "mve_vcvtaq_<supf><mode>"
+ VCVTAQ))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vcvta.<supf>%#<V_sz_elem>.f%#<V_sz_elem> %q0, %q1"
++ "vcvta.<supf>%#<V_sz_elem>.f%#<V_sz_elem>\t%q0, %q1"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -601,7 +601,7 @@ (define_insn "mve_vmvnq_n_<supf><mode>"
+ VMVNQ_N))
+ ]
+ "TARGET_HAVE_MVE"
+- "vmvn.i%#<V_sz_elem> %q0, %1"
++ "vmvn.i%#<V_sz_elem>\t%q0, %1"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -615,7 +615,7 @@ (define_insn "mve_vrev16q_<supf>v16qi"
+ VREV16Q))
+ ]
+ "TARGET_HAVE_MVE"
+- "vrev16.8 %q0, %q1"
++ "vrev16.8\t%q0, %q1"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -629,7 +629,7 @@ (define_insn "mve_vaddlvq_<supf>v4si"
+ VADDLVQ))
+ ]
+ "TARGET_HAVE_MVE"
+- "vaddlv.<supf>32 %Q0, %R0, %q1"
++ "vaddlv.<supf>32\t%Q0, %R0, %q1"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -643,7 +643,7 @@ (define_insn "mve_vctp<mode1>qhi"
+ VCTPQ))
+ ]
+ "TARGET_HAVE_MVE"
+- "vctp.<mode1> %1"
++ "vctp.<mode1>\t%1"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -672,7 +672,7 @@ (define_insn "mve_vsubq_n_f<mode>"
+ VSUBQ_N_F))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vsub.f<V_sz_elem> %q0, %q1, %2"
++ "vsub.f<V_sz_elem>\t%q0, %q1, %2"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -687,7 +687,7 @@ (define_insn "mve_vbrsrq_n_f<mode>"
+ VBRSRQ_N_F))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vbrsr.<V_sz_elem> %q0, %q1, %2"
++ "vbrsr.<V_sz_elem>\t%q0, %q1, %2"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -716,7 +716,7 @@ (define_insn "mve_vcreateq_f<mode>"
+ VCREATEQ_F))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vmov %q0[2], %q0[0], %Q2, %Q1\;vmov %q0[3], %q0[1], %R2, %R1"
++ "vmov %q0[2], %q0[0], %Q1, %Q2\;vmov %q0[3], %q0[1], %R1, %R2"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -731,7 +731,7 @@ (define_insn "mve_vcreateq_<supf><mode>"
+ VCREATEQ))
+ ]
+ "TARGET_HAVE_MVE"
+- "vmov %q0[2], %q0[0], %Q2, %Q1\;vmov %q0[3], %q0[1], %R2, %R1"
++ "vmov %q0[2], %q0[0], %Q1, %Q2\;vmov %q0[3], %q0[1], %R1, %R2"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -810,7 +810,7 @@ (define_insn "mve_vaddlvq_p_<supf>v4si"
+ VADDLVQ_P))
+ ]
+ "TARGET_HAVE_MVE"
+- "vpst\;vaddlvt.<supf>32 %Q0, %R0, %q1"
++ "vpst\;vaddlvt.<supf>32\t%Q0, %R0, %q1"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -824,7 +824,7 @@ (define_insn "@mve_vcmp<mve_cmp_op>q_<mode>"
+ (match_operand:MVE_2 2 "s_register_operand" "w")))
+ ]
+ "TARGET_HAVE_MVE"
+- "vcmp.<mve_cmp_type>%#<V_sz_elem> <mve_cmp_op>, %q1, %q2"
++ "vcmp.<mve_cmp_type>%#<V_sz_elem>\t<mve_cmp_op>, %q1, %q2"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -873,7 +873,7 @@ (define_insn "mve_vaddq_n_<supf><mode>"
+ VADDQ_N))
+ ]
+ "TARGET_HAVE_MVE"
+- "vadd.i%#<V_sz_elem> %q0, %q1, %2"
++ "vadd.i%#<V_sz_elem>\t%q0, %q1, %2"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -888,7 +888,7 @@ (define_insn "mve_vaddvaq_<supf><mode>"
+ VADDVAQ))
+ ]
+ "TARGET_HAVE_MVE"
+- "vaddva.<supf>%#<V_sz_elem> %0, %q2"
++ "vaddva.<supf>%#<V_sz_elem>\t%0, %q2"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -969,7 +969,7 @@ (define_insn "mve_vbrsrq_n_<supf><mode>"
+ VBRSRQ_N))
+ ]
+ "TARGET_HAVE_MVE"
+- "vbrsr.%#<V_sz_elem> %q0, %q1, %2"
++ "vbrsr.%#<V_sz_elem>\t%q0, %q1, %2"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -1828,7 +1828,7 @@ (define_insn "mve_vaddlvaq_<supf>v4si"
+ VADDLVAQ))
+ ]
+ "TARGET_HAVE_MVE"
+- "vaddlva.<supf>32 %Q0, %R0, %q2"
++ "vaddlva.<supf>32\t%Q0, %R0, %q2"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -1843,7 +1843,7 @@ (define_insn "mve_vaddq_n_f<mode>"
+ VADDQ_N_F))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vadd.f%#<V_sz_elem> %q0, %q1, %2"
++ "vadd.f%#<V_sz_elem>\t%q0, %q1, %2"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -1857,7 +1857,7 @@ (define_insn "mve_vandq_f<mode>"
+ (match_operand:MVE_0 2 "s_register_operand" "w")))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vand %q0, %q1, %q2"
++ "vand\t%q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -1871,7 +1871,7 @@ (define_insn "mve_vbicq_f<mode>"
+ (match_operand:MVE_0 2 "s_register_operand" "w")))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vbic %q0, %q1, %q2"
++ "vbic\t%q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -1886,7 +1886,7 @@ (define_insn "mve_vbicq_n_<supf><mode>"
+ VBICQ_N))
+ ]
+ "TARGET_HAVE_MVE"
+- "vbic.i%#<V_sz_elem> %q0, %2"
++ "vbic.i%#<V_sz_elem>\t%q0, %2"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -1960,7 +1960,7 @@ (define_insn "mve_vctp<mode1>q_mhi"
+ VCTPQ_M))
+ ]
+ "TARGET_HAVE_MVE"
+- "vpst\;vctpt.<mode1> %1"
++ "vpst\;vctpt.<mode1>\t%1"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -1975,7 +1975,7 @@ (define_insn "mve_vcvtbq_f16_f32v8hf"
+ VCVTBQ_F16_F32))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vcvtb.f16.f32 %q0, %q2"
++ "vcvtb.f16.f32\t%q0, %q2"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -1990,7 +1990,7 @@ (define_insn "mve_vcvttq_f16_f32v8hf"
+ VCVTTQ_F16_F32))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vcvtt.f16.f32 %q0, %q2"
++ "vcvtt.f16.f32\t%q0, %q2"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -2004,7 +2004,7 @@ (define_insn "mve_veorq_f<mode>"
+ (match_operand:MVE_0 2 "s_register_operand" "w")))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "veor %q0, %q1, %q2"
++ "veor\t%q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -2152,7 +2152,7 @@ (define_insn "mve_vmlaldavxq_s<mode>"
+ VMLALDAVXQ_S))
+ ]
+ "TARGET_HAVE_MVE"
+- "vmlaldavx.s%#<V_sz_elem> %Q0, %R0, %q1, %q2"
++ "vmlaldavx.s%#<V_sz_elem>\t%Q0, %R0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -2167,7 +2167,7 @@ (define_insn "mve_vmlsldavq_s<mode>"
+ VMLSLDAVQ_S))
+ ]
+ "TARGET_HAVE_MVE"
+- "vmlsldav.s%#<V_sz_elem> %Q0, %R0, %q1, %q2"
++ "vmlsldav.s%#<V_sz_elem>\t%Q0, %R0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -2182,7 +2182,7 @@ (define_insn "mve_vmlsldavxq_s<mode>"
+ VMLSLDAVXQ_S))
+ ]
+ "TARGET_HAVE_MVE"
+- "vmlsldavx.s%#<V_sz_elem> %Q0, %R0, %q1, %q2"
++ "vmlsldavx.s%#<V_sz_elem>\t%Q0, %R0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -2255,7 +2255,7 @@ (define_insn "mve_vornq_f<mode>"
+ (match_operand:MVE_0 1 "s_register_operand" "w")))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vorn %q0, %q1, %q2"
++ "vorn\t%q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -2269,7 +2269,7 @@ (define_insn "mve_vorrq_f<mode>"
+ (match_operand:MVE_0 2 "s_register_operand" "w")))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vorr %q0, %q1, %q2"
++ "vorr\t%q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -2419,7 +2419,7 @@ (define_insn "mve_vrmlaldavhxq_sv4si"
+ VRMLALDAVHXQ_S))
+ ]
+ "TARGET_HAVE_MVE"
+- "vrmlaldavhx.s32 %Q0, %R0, %q1, %q2"
++ "vrmlaldavhx.s32\t%Q0, %R0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -2538,7 +2538,7 @@ (define_insn "mve_vrmlaldavhq_<supf>v4si"
+ VRMLALDAVHQ))
+ ]
+ "TARGET_HAVE_MVE"
+- "vrmlaldavh.<supf>32 %Q0, %R0, %q1, %q2"
++ "vrmlaldavh.<supf>32\t%Q0, %R0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -2554,7 +2554,7 @@ (define_insn "mve_vbicq_m_n_<supf><mode>"
+ VBICQ_M_N))
+ ]
+ "TARGET_HAVE_MVE"
+- "vpst\;vbict.i%#<V_sz_elem> %q0, %2"
++ "vpst\;vbict.i%#<V_sz_elem>\t%q0, %2"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+ ;;
+@@ -2599,7 +2599,7 @@ (define_insn "mve_vcvtq_m_to_f_<supf><mode>"
+ VCVTQ_M_TO_F))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vpst\;vcvtt.f%#<V_sz_elem>.<supf>%#<V_sz_elem> %q0, %q2"
++ "vpst\;vcvtt.f%#<V_sz_elem>.<supf>%#<V_sz_elem>\t%q0, %q2"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+ ;;
+@@ -2644,7 +2644,7 @@ (define_insn "mve_vrmlaldavhaq_<supf>v4si"
+ VRMLALDAVHAQ))
+ ]
+ "TARGET_HAVE_MVE"
+- "vrmlaldavha.<supf>32 %Q0, %R0, %q2, %q3"
++ "vrmlaldavha.<supf>32\t%Q0, %R0, %q2, %q3"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -2707,7 +2707,7 @@ (define_insn "mve_vshlcq_<supf><mode>"
+ (match_dup 4)]
+ VSHLCQ))]
+ "TARGET_HAVE_MVE"
+- "vshlc %q0, %1, %4")
++ "vshlc\t%q0, %1, %4")
+
+ ;;
+ ;; [vabsq_m_s])
+@@ -3041,7 +3041,7 @@ (define_insn "mve_vdupq_m_n_<supf><mode>"
+ VDUPQ_M_N))
+ ]
+ "TARGET_HAVE_MVE"
+- "vpst\;vdupt.%#<V_sz_elem> %q0, %2"
++ "vpst\;vdupt.%#<V_sz_elem>\t%q0, %2"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -3265,7 +3265,7 @@ (define_insn "mve_vmvnq_m_<supf><mode>"
+ VMVNQ_M))
+ ]
+ "TARGET_HAVE_MVE"
+- "vpst\;vmvnt %q0, %q2"
++ "vpst\;vmvnt\t%q0, %q2"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -3297,7 +3297,7 @@ (define_insn "@mve_vpselq_<supf><mode>"
+ VPSELQ))
+ ]
+ "TARGET_HAVE_MVE"
+- "vpsel %q0, %q1, %q2"
++ "vpsel\t%q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -3498,7 +3498,7 @@ (define_insn "mve_vqshlq_m_r_<supf><mode>"
+ ;;
+ (define_insn "mve_vrev64q_m_<supf><mode>"
+ [
+- (set (match_operand:MVE_2 0 "s_register_operand" "=w")
++ (set (match_operand:MVE_2 0 "s_register_operand" "=&w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0")
+ (match_operand:MVE_2 2 "s_register_operand" "w")
+ (match_operand:<MVE_VPRED> 3 "vpr_register_operand" "Up")]
+@@ -3712,7 +3712,7 @@ (define_insn "mve_vaddlvaq_p_<supf>v4si"
+ VADDLVAQ_P))
+ ]
+ "TARGET_HAVE_MVE"
+- "vpst\;vaddlvat.<supf>32 %Q0, %R0, %q2"
++ "vpst\;vaddlvat.<supf>32\t%Q0, %R0, %q2"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+ ;;
+@@ -3922,7 +3922,7 @@ (define_insn "mve_vcvtbq_m_f16_f32v8hf"
+ VCVTBQ_M_F16_F32))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vpst\;vcvtbt.f16.f32 %q0, %q2"
++ "vpst\;vcvtbt.f16.f32\t%q0, %q2"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -3938,7 +3938,7 @@ (define_insn "mve_vcvtbq_m_f32_f16v4sf"
+ VCVTBQ_M_F32_F16))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vpst\;vcvtbt.f32.f16 %q0, %q2"
++ "vpst\;vcvtbt.f32.f16\t%q0, %q2"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -3954,7 +3954,7 @@ (define_insn "mve_vcvttq_m_f16_f32v8hf"
+ VCVTTQ_M_F16_F32))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vpst\;vcvttt.f16.f32 %q0, %q2"
++ "vpst\;vcvttt.f16.f32\t%q0, %q2"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -3970,7 +3970,7 @@ (define_insn "mve_vcvttq_m_f32_f16v4sf"
+ VCVTTQ_M_F32_F16))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vpst\;vcvttt.f32.f16 %q0, %q2"
++ "vpst\;vcvttt.f32.f16\t%q0, %q2"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -3986,7 +3986,7 @@ (define_insn "mve_vdupq_m_n_f<mode>"
+ VDUPQ_M_N_F))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vpst\;vdupt.%#<V_sz_elem> %q0, %2"
++ "vpst\;vdupt.%#<V_sz_elem>\t%q0, %2"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -4158,7 +4158,7 @@ (define_insn "mve_vmlaldavaq_<supf><mode>"
+ VMLALDAVAQ))
+ ]
+ "TARGET_HAVE_MVE"
+- "vmlaldava.<supf>%#<V_sz_elem> %Q0, %R0, %q2, %q3"
++ "vmlaldava.<supf>%#<V_sz_elem>\t%Q0, %R0, %q2, %q3"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -4174,7 +4174,7 @@ (define_insn "mve_vmlaldavaxq_s<mode>"
+ VMLALDAVAXQ_S))
+ ]
+ "TARGET_HAVE_MVE"
+- "vmlaldavax.s%#<V_sz_elem> %Q0, %R0, %q2, %q3"
++ "vmlaldavax.s%#<V_sz_elem>\t%Q0, %R0, %q2, %q3"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -4190,7 +4190,7 @@ (define_insn "mve_vmlaldavq_p_<supf><mode>"
+ VMLALDAVQ_P))
+ ]
+ "TARGET_HAVE_MVE"
+- "vpst\;vmlaldavt.<supf>%#<V_sz_elem> %Q0, %R0, %q1, %q2"
++ "vpst\;vmlaldavt.<supf>%#<V_sz_elem>\t%Q0, %R0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -4221,7 +4221,7 @@ (define_insn "mve_vmlsldavaq_s<mode>"
+ VMLSLDAVAQ_S))
+ ]
+ "TARGET_HAVE_MVE"
+- "vmlsldava.s%#<V_sz_elem> %Q0, %R0, %q2, %q3"
++ "vmlsldava.s%#<V_sz_elem>\t%Q0, %R0, %q2, %q3"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -4237,7 +4237,7 @@ (define_insn "mve_vmlsldavaxq_s<mode>"
+ VMLSLDAVAXQ_S))
+ ]
+ "TARGET_HAVE_MVE"
+- "vmlsldavax.s%#<V_sz_elem> %Q0, %R0, %q2, %q3"
++ "vmlsldavax.s%#<V_sz_elem>\t%Q0, %R0, %q2, %q3"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -4253,7 +4253,7 @@ (define_insn "mve_vmlsldavq_p_s<mode>"
+ VMLSLDAVQ_P_S))
+ ]
+ "TARGET_HAVE_MVE"
+- "vpst\;vmlsldavt.s%#<V_sz_elem> %Q0, %R0, %q1, %q2"
++ "vpst\;vmlsldavt.s%#<V_sz_elem>\t%Q0, %R0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -4269,7 +4269,7 @@ (define_insn "mve_vmlsldavxq_p_s<mode>"
+ VMLSLDAVXQ_P_S))
+ ]
+ "TARGET_HAVE_MVE"
+- "vpst\;vmlsldavxt.s%#<V_sz_elem> %Q0, %R0, %q1, %q2"
++ "vpst\;vmlsldavxt.s%#<V_sz_elem>\t%Q0, %R0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+ ;;
+@@ -4346,7 +4346,7 @@ (define_insn "mve_vmvnq_m_n_<supf><mode>"
+ VMVNQ_M_N))
+ ]
+ "TARGET_HAVE_MVE"
+- "vpst\;vmvnt.i%#<V_sz_elem> %q0, %2"
++ "vpst\;vmvnt.i%#<V_sz_elem>\t%q0, %2"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+ ;;
+@@ -4377,7 +4377,7 @@ (define_insn "mve_vorrq_m_n_<supf><mode>"
+ VORRQ_M_N))
+ ]
+ "TARGET_HAVE_MVE"
+- "vpst\;vorrt.i%#<V_sz_elem> %q0, %2"
++ "vpst\;vorrt.i%#<V_sz_elem>\t%q0, %2"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+ ;;
+@@ -4392,7 +4392,7 @@ (define_insn "@mve_vpselq_f<mode>"
+ VPSELQ_F))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vpsel %q0, %q1, %q2"
++ "vpsel\t%q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -4568,7 +4568,7 @@ (define_insn "mve_vrev32q_m_fv8hf"
+ VREV32Q_M_F))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vpst\;vrev32t.16 %q0, %q2"
++ "vpst\;vrev32t.16\t%q0, %q2"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -4584,7 +4584,7 @@ (define_insn "mve_vrev32q_m_<supf><mode>"
+ VREV32Q_M))
+ ]
+ "TARGET_HAVE_MVE"
+- "vpst\;vrev32t.%#<V_sz_elem> %q0, %q2"
++ "vpst\;vrev32t.%#<V_sz_elem>\t%q0, %q2"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -4593,14 +4593,14 @@ (define_insn "mve_vrev32q_m_<supf><mode>"
+ ;;
+ (define_insn "mve_vrev64q_m_f<mode>"
+ [
+- (set (match_operand:MVE_0 0 "s_register_operand" "=w")
++ (set (match_operand:MVE_0 0 "s_register_operand" "=&w")
+ (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "0")
+ (match_operand:MVE_0 2 "s_register_operand" "w")
+ (match_operand:<MVE_VPRED> 3 "vpr_register_operand" "Up")]
+ VREV64Q_M_F))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vpst\;vrev64t.%#<V_sz_elem> %q0, %q2"
++ "vpst\;vrev64t.%#<V_sz_elem>\t%q0, %q2"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -4616,7 +4616,7 @@ (define_insn "mve_vrmlaldavhaxq_sv4si"
+ VRMLALDAVHAXQ_S))
+ ]
+ "TARGET_HAVE_MVE"
+- "vrmlaldavhax.s32 %Q0, %R0, %q2, %q3"
++ "vrmlaldavhax.s32\t%Q0, %R0, %q2, %q3"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -4632,7 +4632,7 @@ (define_insn "mve_vrmlaldavhxq_p_sv4si"
+ VRMLALDAVHXQ_P_S))
+ ]
+ "TARGET_HAVE_MVE"
+- "vpst\;vrmlaldavhxt.s32 %Q0, %R0, %q1, %q2"
++ "vpst\;vrmlaldavhxt.s32\t%Q0, %R0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -4648,7 +4648,7 @@ (define_insn "mve_vrmlsldavhaxq_sv4si"
+ VRMLSLDAVHAXQ_S))
+ ]
+ "TARGET_HAVE_MVE"
+- "vrmlsldavhax.s32 %Q0, %R0, %q2, %q3"
++ "vrmlsldavhax.s32\t%Q0, %R0, %q2, %q3"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -4664,7 +4664,7 @@ (define_insn "mve_vrmlsldavhq_p_sv4si"
+ VRMLSLDAVHQ_P_S))
+ ]
+ "TARGET_HAVE_MVE"
+- "vpst\;vrmlsldavht.s32 %Q0, %R0, %q1, %q2"
++ "vpst\;vrmlsldavht.s32\t%Q0, %R0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -4680,7 +4680,7 @@ (define_insn "mve_vrmlsldavhxq_p_sv4si"
+ VRMLSLDAVHXQ_P_S))
+ ]
+ "TARGET_HAVE_MVE"
+- "vpst\;vrmlsldavhxt.s32 %Q0, %R0, %q1, %q2"
++ "vpst\;vrmlsldavhxt.s32\t%Q0, %R0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -4905,7 +4905,7 @@ (define_insn "mve_vrev16q_m_<supf>v16qi"
+ VREV16Q_M))
+ ]
+ "TARGET_HAVE_MVE"
+- "vpst\;vrev16t.8 %q0, %q2"
++ "vpst\;vrev16t.8\t%q0, %q2"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -4937,7 +4937,7 @@ (define_insn "mve_vrmlaldavhq_p_<supf>v4si"
+ VRMLALDAVHQ_P))
+ ]
+ "TARGET_HAVE_MVE"
+- "vpst\;vrmlaldavht.<supf>32 %Q0, %R0, %q1, %q2"
++ "vpst\;vrmlaldavht.<supf>32\t%Q0, %R0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -4953,7 +4953,7 @@ (define_insn "mve_vrmlsldavhaq_sv4si"
+ VRMLSLDAVHAQ_S))
+ ]
+ "TARGET_HAVE_MVE"
+- "vrmlsldavha.s32 %Q0, %R0, %q2, %q3"
++ "vrmlsldavha.s32\t%Q0, %R0, %q2, %q3"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -4972,7 +4972,7 @@ (define_insn "mve_vabavq_p_<supf><mode>"
+ "TARGET_HAVE_MVE"
+ "vpst\;vabavt.<supf>%#<V_sz_elem>\t%0, %q2, %q3"
+ [(set_attr "type" "mve_move")
+-])
++ (set_attr "length" "8")])
+
+ ;;
+ ;; [vqshluq_m_n_s])
+@@ -4988,7 +4988,8 @@ (define_insn "mve_vqshluq_m_n_s<mode>"
+ ]
+ "TARGET_HAVE_MVE"
+ "vpst\n\tvqshlut.s%#<V_sz_elem>\t%q0, %q2, %3"
+- [(set_attr "type" "mve_move")])
++ [(set_attr "type" "mve_move")
++ (set_attr "length" "8")])
+
+ ;;
+ ;; [vshlq_m_s, vshlq_m_u])
+@@ -5004,7 +5005,8 @@ (define_insn "mve_vshlq_m_<supf><mode>"
+ ]
+ "TARGET_HAVE_MVE"
+ "vpst\;vshlt.<supf>%#<V_sz_elem>\t%q0, %q2, %q3"
+- [(set_attr "type" "mve_move")])
++ [(set_attr "type" "mve_move")
++ (set_attr "length" "8")])
+
+ ;;
+ ;; [vsriq_m_n_s, vsriq_m_n_u])
+@@ -5020,7 +5022,8 @@ (define_insn "mve_vsriq_m_n_<supf><mode>"
+ ]
+ "TARGET_HAVE_MVE"
+ "vpst\;vsrit.%#<V_sz_elem>\t%q0, %q2, %3"
+- [(set_attr "type" "mve_move")])
++ [(set_attr "type" "mve_move")
++ (set_attr "length" "8")])
+
+ ;;
+ ;; [vsubq_m_u, vsubq_m_s])
+@@ -5036,7 +5039,8 @@ (define_insn "mve_vsubq_m_<supf><mode>"
+ ]
+ "TARGET_HAVE_MVE"
+ "vpst\;vsubt.i%#<V_sz_elem>\t%q0, %q2, %q3"
+- [(set_attr "type" "mve_move")])
++ [(set_attr "type" "mve_move")
++ (set_attr "length" "8")])
+
+ ;;
+ ;; [vcvtq_m_n_to_f_u, vcvtq_m_n_to_f_s])
+@@ -5118,7 +5122,7 @@ (define_insn "mve_vandq_m_<supf><mode>"
+ VANDQ_M))
+ ]
+ "TARGET_HAVE_MVE"
+- "vpst\;vandt %q0, %q2, %q3"
++ "vpst\;vandt\t%q0, %q2, %q3"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -5135,7 +5139,7 @@ (define_insn "mve_vbicq_m_<supf><mode>"
+ VBICQ_M))
+ ]
+ "TARGET_HAVE_MVE"
+- "vpst\;vbict %q0, %q2, %q3"
++ "vpst\;vbict\t%q0, %q2, %q3"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -5203,7 +5207,7 @@ (define_insn "mve_veorq_m_<supf><mode>"
+ VEORQ_M))
+ ]
+ "TARGET_HAVE_MVE"
+- "vpst\;veort %q0, %q2, %q3"
++ "vpst\;veort\t%q0, %q2, %q3"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -5458,7 +5462,7 @@ (define_insn "mve_vornq_m_<supf><mode>"
+ VORNQ_M))
+ ]
+ "TARGET_HAVE_MVE"
+- "vpst\;vornt %q0, %q2, %q3"
++ "vpst\;vornt\t%q0, %q2, %q3"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -5475,7 +5479,7 @@ (define_insn "mve_vorrq_m_<supf><mode>"
+ VORRQ_M))
+ ]
+ "TARGET_HAVE_MVE"
+- "vpst\;vorrt %q0, %q2, %q3"
++ "vpst\;vorrt\t%q0, %q2, %q3"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -6121,7 +6125,7 @@ (define_insn "mve_vmlaldavaxq_p_<supf><mode>"
+ VMLALDAVAXQ_P))
+ ]
+ "TARGET_HAVE_MVE"
+- "vpst\;vmlaldavaxt.<supf>%#<V_sz_elem> %Q0, %R0, %q2, %q3"
++ "vpst\;vmlaldavaxt.<supf>%#<V_sz_elem>\t%Q0, %R0, %q2, %q3"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -6647,7 +6651,7 @@ (define_insn "mve_vandq_m_f<mode>"
+ VANDQ_M_F))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vpst\;vandt %q0, %q2, %q3"
++ "vpst\;vandt\t%q0, %q2, %q3"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -6664,7 +6668,7 @@ (define_insn "mve_vbicq_m_f<mode>"
+ VBICQ_M_F))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vpst\;vbict %q0, %q2, %q3"
++ "vpst\;vbict\t%q0, %q2, %q3"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -6868,7 +6872,7 @@ (define_insn "mve_veorq_m_f<mode>"
+ VEORQ_M_F))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vpst\;veort %q0, %q2, %q3"
++ "vpst\;veort\t%q0, %q2, %q3"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -7021,7 +7025,7 @@ (define_insn "mve_vornq_m_f<mode>"
+ VORNQ_M_F))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vpst\;vornt %q0, %q2, %q3"
++ "vpst\;vornt\t%q0, %q2, %q3"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -7038,7 +7042,7 @@ (define_insn "mve_vorrq_m_f<mode>"
+ VORRQ_M_F))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vpst\;vorrt %q0, %q2, %q3"
++ "vpst\;vorrt\t%q0, %q2, %q3"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -7265,15 +7269,13 @@ (define_insn "mve_vstrwq_scatter_base_p_<supf>v4si"
+ }
+ [(set_attr "length" "8")])
+
+-;;
+-;; [vstrbq_p_s vstrbq_p_u]
+-;;
+ (define_insn "mve_vstrbq_p_<supf><mode>"
+ [(set (match_operand:<MVE_B_ELEM> 0 "mve_memory_operand" "=Ux")
+- (unspec:<MVE_B_ELEM> [(match_operand:MVE_2 1 "s_register_operand" "w")
+- (match_operand:<MVE_VPRED> 2 "vpr_register_operand" "Up")]
+- VSTRBQ))
+- ]
++ (unspec:<MVE_B_ELEM>
++ [(match_operand:MVE_2 1 "s_register_operand" "w")
++ (match_operand:<MVE_VPRED> 2 "vpr_register_operand" "Up")
++ (match_dup 0)]
++ VSTRBQ))]
+ "TARGET_HAVE_MVE"
+ {
+ rtx ops[2];
+@@ -8072,10 +8074,11 @@ (define_insn "mve_vstrhq_fv8hf"
+ ;;
+ (define_insn "mve_vstrhq_p_fv8hf"
+ [(set (match_operand:V8HI 0 "mve_memory_operand" "=Ux")
+- (unspec:V8HI [(match_operand:V8HF 1 "s_register_operand" "w")
+- (match_operand:V8BI 2 "vpr_register_operand" "Up")]
+- VSTRHQ_F))
+- ]
++ (unspec:V8HI
++ [(match_operand:V8HF 1 "s_register_operand" "w")
++ (match_operand:V8BI 2 "vpr_register_operand" "Up")
++ (match_dup 0)]
++ VSTRHQ_F))]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+ {
+ rtx ops[2];
+@@ -8092,8 +8095,10 @@ (define_insn "mve_vstrhq_p_fv8hf"
+ ;;
+ (define_insn "mve_vstrhq_p_<supf><mode>"
+ [(set (match_operand:<MVE_H_ELEM> 0 "mve_memory_operand" "=Ux")
+- (unspec:<MVE_H_ELEM> [(match_operand:MVE_6 1 "s_register_operand" "w")
+- (match_operand:<MVE_VPRED> 2 "vpr_register_operand" "Up")]
++ (unspec:<MVE_H_ELEM>
++ [(match_operand:MVE_6 1 "s_register_operand" "w")
++ (match_operand:<MVE_VPRED> 2 "vpr_register_operand" "Up")
++ (match_dup 0)]
+ VSTRHQ))
+ ]
+ "TARGET_HAVE_MVE"
+@@ -8271,10 +8276,11 @@ (define_insn "mve_vstrwq_fv4sf"
+ ;;
+ (define_insn "mve_vstrwq_p_fv4sf"
+ [(set (match_operand:V4SI 0 "mve_memory_operand" "=Ux")
+- (unspec:V4SI [(match_operand:V4SF 1 "s_register_operand" "w")
+- (match_operand:<MVE_VPRED> 2 "vpr_register_operand" "Up")]
+- VSTRWQ_F))
+- ]
++ (unspec:V4SI
++ [(match_operand:V4SF 1 "s_register_operand" "w")
++ (match_operand:<MVE_VPRED> 2 "vpr_register_operand" "Up")
++ (match_dup 0)]
++ VSTRWQ_F))]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+ {
+ rtx ops[2];
+@@ -8291,10 +8297,11 @@ (define_insn "mve_vstrwq_p_fv4sf"
+ ;;
+ (define_insn "mve_vstrwq_p_<supf>v4si"
+ [(set (match_operand:V4SI 0 "mve_memory_operand" "=Ux")
+- (unspec:V4SI [(match_operand:V4SI 1 "s_register_operand" "w")
+- (match_operand:V4BI 2 "vpr_register_operand" "Up")]
+- VSTRWQ))
+- ]
++ (unspec:V4SI
++ [(match_operand:V4SI 1 "s_register_operand" "w")
++ (match_operand:V4BI 2 "vpr_register_operand" "Up")
++ (match_dup 0)]
++ VSTRWQ))]
+ "TARGET_HAVE_MVE"
+ {
+ rtx ops[2];
+@@ -8478,7 +8485,7 @@ (define_insn "mve_vstrdq_scatter_shifted_offset_p_<supf>v2di_insn"
+ (match_operand:HI 3 "vpr_register_operand" "Up")]
+ VSTRDSSOQ))]
+ "TARGET_HAVE_MVE"
+- "vpst\;vstrdt.64\t%q2, [%0, %q1, UXTW #3]"
++ "vpst\;vstrdt.64\t%q2, [%0, %q1, uxtw #3]"
+ [(set_attr "length" "8")])
+
+ ;;
+@@ -8507,7 +8514,7 @@ (define_insn "mve_vstrdq_scatter_shifted_offset_<supf>v2di_insn"
+ (match_operand:V2DI 2 "s_register_operand" "w")]
+ VSTRDSSOQ))]
+ "TARGET_HAVE_MVE"
+- "vstrd.64\t%q2, [%0, %q1, UXTW #3]"
++ "vstrd.64\t%q2, [%0, %q1, uxtw #3]"
+ [(set_attr "length" "4")])
+
+ ;;
+@@ -8923,7 +8930,7 @@ (define_insn "mve_vaddq<mode>"
+ (match_operand:MVE_2 2 "s_register_operand" "w")))
+ ]
+ "TARGET_HAVE_MVE"
+- "vadd.i%#<V_sz_elem> %q0, %q1, %q2"
++ "vadd.i%#<V_sz_elem>\t%q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -8937,7 +8944,7 @@ (define_insn "mve_vaddq_f<mode>"
+ (match_operand:MVE_0 2 "s_register_operand" "w")))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+- "vadd.f%#<V_sz_elem> %q0, %q1, %q2"
++ "vadd.f%#<V_sz_elem>\t%q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+ ])
+
+@@ -9038,7 +9045,7 @@ (define_insn "mve_vddupq_u<mode>_insn"
+ (minus:SI (match_dup 2)
+ (match_operand:SI 4 "immediate_operand" "i")))]
+ "TARGET_HAVE_MVE"
+- "vddup.u%#<V_sz_elem> %q0, %1, %3")
++ "vddup.u%#<V_sz_elem>\t%q0, %1, %3")
+
+ ;;
+ ;; [vddupq_m_n_u])
+@@ -9074,7 +9081,7 @@ (define_insn "mve_vddupq_m_wb_u<mode>_insn"
+ (minus:SI (match_dup 3)
+ (match_operand:SI 6 "immediate_operand" "i")))]
+ "TARGET_HAVE_MVE"
+- "vpst\;\tvddupt.u%#<V_sz_elem>\t%q0, %2, %4"
++ "vpst\;vddupt.u%#<V_sz_elem>\t%q0, %2, %4"
+ [(set_attr "length""8")])
+
+ ;;
+@@ -9190,7 +9197,7 @@ (define_insn "mve_vdwdupq_m_wb_u<mode>_insn"
+ VDWDUPQ_M))
+ ]
+ "TARGET_HAVE_MVE"
+- "vpst\;\tvdwdupt.u%#<V_sz_elem>\t%q2, %3, %R4, %5"
++ "vpst\;vdwdupt.u%#<V_sz_elem>\t%q2, %3, %R4, %5"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+@@ -9345,7 +9352,7 @@ (define_insn "mve_vstrwq_scatter_base_wb_p_<supf>v4si"
+ [(match_operand:V4SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "mve_vldrd_immediate" "Ri")
+ (match_operand:V4SI 3 "s_register_operand" "w")
+- (match_operand:V4BI 4 "vpr_register_operand")]
++ (match_operand:V4BI 4 "vpr_register_operand" "Up")]
+ VSTRWSBWBQ))
+ (set (match_operand:V4SI 0 "s_register_operand" "=w")
+ (unspec:V4SI [(match_dup 1) (match_dup 2)]
+@@ -9394,9 +9401,9 @@ (define_insn "mve_vstrwq_scatter_base_wb_p_fv4sf"
+ [(set (mem:BLK (scratch))
+ (unspec:BLK
+ [(match_operand:V4SI 1 "s_register_operand" "0")
+- (match_operand:SI 2 "mve_vldrd_immediate" "Ri")
++ (match_operand:SI 2 "mve_vstrw_immediate" "Rl")
+ (match_operand:V4SF 3 "s_register_operand" "w")
+- (match_operand:V4BI 4 "vpr_register_operand")]
++ (match_operand:V4BI 4 "vpr_register_operand" "Up")]
+ VSTRWQSBWB_F))
+ (set (match_operand:V4SI 0 "s_register_operand" "=w")
+ (unspec:V4SI [(match_dup 1) (match_dup 2)]
+@@ -9408,7 +9415,7 @@ (define_insn "mve_vstrwq_scatter_base_wb_p_fv4sf"
+ ops[0] = operands[1];
+ ops[1] = operands[2];
+ ops[2] = operands[3];
+- output_asm_insn ("vpst\;\tvstrwt.u32\t%q2, [%q0, %1]!",ops);
++ output_asm_insn ("vpst\;vstrwt.u32\t%q2, [%q0, %1]!",ops);
+ return "";
+ }
+ [(set_attr "length" "8")])
+@@ -9447,7 +9454,7 @@ (define_insn "mve_vstrdq_scatter_base_wb_p_<supf>v2di"
+ [(match_operand:V2DI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "mve_vldrd_immediate" "Ri")
+ (match_operand:V2DI 3 "s_register_operand" "w")
+- (match_operand:HI 4 "vpr_register_operand")]
++ (match_operand:HI 4 "vpr_register_operand" "Up")]
+ VSTRDSBWBQ))
+ (set (match_operand:V2DI 0 "s_register_operand" "=w")
+ (unspec:V2DI [(match_dup 1) (match_dup 2)]
+@@ -9459,7 +9466,7 @@ (define_insn "mve_vstrdq_scatter_base_wb_p_<supf>v2di"
+ ops[0] = operands[1];
+ ops[1] = operands[2];
+ ops[2] = operands[3];
+- output_asm_insn ("vpst;vstrdt.u64\t%q2, [%q0, %1]!",ops);
++ output_asm_insn ("vpst\;vstrdt.u64\t%q2, [%q0, %1]!",ops);
+ return "";
+ }
+ [(set_attr "length" "8")])
+@@ -9768,7 +9775,7 @@ (define_expand "mve_vldrdq_gather_base_nowb_z_<supf>v2di"
+
+ (define_insn "get_fpscr_nzcvqc"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+- (unspec:SI [(reg:SI VFPCC_REGNUM)] UNSPEC_GET_FPSCR_NZCVQC))]
++ (unspec_volatile:SI [(reg:SI VFPCC_REGNUM)] UNSPEC_GET_FPSCR_NZCVQC))]
+ "TARGET_HAVE_MVE"
+ "vmrs\\t%0, FPSCR_nzcvqc"
+ [(set_attr "type" "mve_move")])
+--- a/src/gcc/config/arm/predicates.md
++++ b/src/gcc/config/arm/predicates.md
+@@ -73,13 +73,13 @@ (define_predicate "mve_imm_32"
+ (define_predicate "mve_imm_selective_upto_8"
+ (match_test "satisfies_constraint_Rg (op)"))
+
+-;; True if the immediate is the range +/- 1016 and multiple of 8 for MVE.
+-(define_constraint "Ri"
+- "@internal In Thumb-2 state a constant is multiple of 8 and in range
+- of -/+ 1016 for MVE"
+- (and (match_code "const_int")
+- (match_test "TARGET_HAVE_MVE && (-1016 <= ival) && (ival <= 1016)
+- && ((ival % 8) == 0)")))
++;; True if the immediate is multiple of 8 and in range of -/+ 1016 for MVE.
++(define_predicate "mve_vldrd_immediate"
++ (match_test "satisfies_constraint_Ri (op)"))
++
++;; True if the immediate is multiple of 2 and in range of -/+ 252 for MVE.
++(define_predicate "mve_vstrw_immediate"
++ (match_test "satisfies_constraint_Rl (op)"))
+
+ ; Predicate for stack protector guard's address in
+ ; stack_protect_combined_set_insn and stack_protect_combined_test_insn patterns
+--- a/src/gcc/config/arm/vfp.md
++++ b/src/gcc/config/arm/vfp.md
+@@ -105,9 +105,9 @@ (define_insn "*thumb2_movhi_vfp"
+ case 8:
+ return "vmov%?.f32\t%0, %1\t%@ int";
+ case 9:
+- return "vmsr%?\t P0, %1\t@ movhi";
++ return "vmsr%?\tp0, %1\t@ movhi";
+ case 10:
+- return "vmrs%?\t %0, P0\t@ movhi";
++ return "vmrs%?\t%0, p0\t@ movhi";
+ default:
+ gcc_unreachable ();
+ }
+@@ -209,9 +209,9 @@ (define_insn "*thumb2_movhi_fp16"
+ case 8:
+ return "vmov%?.f32\t%0, %1\t%@ int";
+ case 9:
+- return "vmsr%?\t P0, %1\t%@ movhi";
++ return "vmsr%?\tp0, %1\t%@ movhi";
+ case 10:
+- return "vmrs%?\t%0, P0\t%@ movhi";
++ return "vmrs%?\t%0, p0\t%@ movhi";
+ default:
+ gcc_unreachable ();
+ }
+@@ -312,9 +312,9 @@ (define_insn "*thumb2_movsi_vfp"
+ case 12: case 13:
+ return output_move_vfp (operands);
+ case 14:
+- return \"vmsr\\t P0, %1\";
++ return \"vmsr\\tp0, %1\";
+ case 15:
+- return \"vmrs\\t %0, P0\";
++ return \"vmrs\\t%0, p0\";
+ case 16:
+ return \"mcr\\tp10, 7, %1, cr1, cr0, 0\\t @SET_FPSCR\";
+ case 17:
+@@ -2138,7 +2138,7 @@ (define_insn "get_fpscr"
+ (define_insn_and_split "no_literal_pool_df_immediate"
+ [(set (match_operand:DF 0 "s_register_operand" "=w")
+ (match_operand:DF 1 "const_double_operand" "F"))
+- (clobber (match_operand:DF 2 "s_register_operand" "=r"))]
++ (clobber (match_operand:DI 2 "s_register_operand" "=r"))]
+ "arm_disable_literal_pool
+ && TARGET_VFP_BASE
+ && !arm_const_double_rtx (operands[1])
+@@ -2153,8 +2153,9 @@ (define_insn_and_split "no_literal_pool_df_immediate"
+ unsigned HOST_WIDE_INT ival = zext_hwi (buf[order], 32);
+ ival |= (zext_hwi (buf[1 - order], 32) << 32);
+ rtx cst = gen_int_mode (ival, DImode);
+- emit_move_insn (simplify_gen_subreg (DImode, operands[2], DFmode, 0), cst);
+- emit_move_insn (operands[0], operands[2]);
++ emit_move_insn (operands[2], cst);
++ emit_move_insn (operands[0],
++ simplify_gen_subreg (DFmode, operands[2], DImode, 0));
+ DONE;
+ }
+ )
+--- a/src/gcc/config/avr/avr-dimode.md
++++ b/src/gcc/config/avr/avr-dimode.md
+@@ -455,12 +455,18 @@ (define_expand "conditional_jump"
+ (define_expand "cbranch<mode>4"
+ [(set (pc)
+ (if_then_else (match_operator 0 "ordered_comparison_operator"
+- [(match_operand:ALL8 1 "register_operand" "")
+- (match_operand:ALL8 2 "nonmemory_operand" "")])
+- (label_ref (match_operand 3 "" ""))
+- (pc)))]
++ [(match_operand:ALL8 1 "register_operand")
++ (match_operand:ALL8 2 "nonmemory_operand")])
++ (label_ref (match_operand 3))
++ (pc)))]
+ "avr_have_dimode"
+ {
++ int icode = (int) GET_CODE (operands[0]);
++
++ targetm.canonicalize_comparison (&icode, &operands[1], &operands[2], false);
++ operands[0] = gen_rtx_fmt_ee ((enum rtx_code) icode,
++ VOIDmode, operands[1], operands[2]);
++
+ rtx acc_a = gen_rtx_REG (<MODE>mode, ACC_A);
+
+ avr_fix_inputs (operands, 1 << 2, regmask (<MODE>mode, ACC_A));
+@@ -490,8 +496,8 @@ (define_insn_and_split "cbranch_<mode>2_split"
+ (if_then_else (match_operator 0 "ordered_comparison_operator"
+ [(reg:ALL8 ACC_A)
+ (reg:ALL8 ACC_B)])
+- (label_ref (match_operand 1 "" ""))
+- (pc)))]
++ (label_ref (match_operand 1))
++ (pc)))]
+ "avr_have_dimode"
+ "#"
+ "&& reload_completed"
+@@ -544,8 +550,8 @@ (define_insn_and_split "cbranch_const_<mode>2_split"
+ (if_then_else (match_operator 0 "ordered_comparison_operator"
+ [(reg:ALL8 ACC_A)
+ (match_operand:ALL8 1 "const_operand" "n Ynn")])
+- (label_ref (match_operand 2 "" ""))
+- (pc)))
++ (label_ref (match_operand 2 "" ""))
++ (pc)))
+ (clobber (match_scratch:QI 3 "=&d"))]
+ "avr_have_dimode
+ && !s8_operand (operands[1], VOIDmode)"
+--- a/src/gcc/config/avr/avr-passes.def
++++ b/src/gcc/config/avr/avr-passes.def
+@@ -43,3 +43,23 @@ INSERT_PASS_BEFORE (pass_free_cfg, 1, avr_pass_recompute_notes);
+ insns withaout any insns in between. */
+
+ INSERT_PASS_AFTER (pass_expand, 1, avr_pass_casesi);
++
++/* If-else decision trees generated for switch / case may produce sequences
++ like
++
++ SREG = compare (reg, val);
++ if (SREG == 0) goto label1;
++ SREG = compare (reg, 1 + val);
++ if (SREG >= 0) goto label2;
++
++ which can be optimized to
++
++ SREG = compare (reg, val);
++ if (SREG == 0) goto label1;
++ if (SREG >= 0) goto label2;
++
++ The optimal place for such a pass would be directly after expand, but
++ it's not possible for a jump insn to target more than one code label.
++ Hence, run a mini pass right before split2 which introduces REG_CC. */
++
++INSERT_PASS_BEFORE (pass_split_after_reload, 1, avr_pass_ifelse);
+--- a/src/gcc/config/avr/avr-protos.h
++++ b/src/gcc/config/avr/avr-protos.h
+@@ -58,6 +58,8 @@ extern const char *ret_cond_branch (rtx x, int len, int reverse);
+ extern const char *avr_out_movpsi (rtx_insn *, rtx*, int*);
+ extern const char *avr_out_sign_extend (rtx_insn *, rtx*, int*);
+ extern const char *avr_out_insert_notbit (rtx_insn *, rtx*, rtx, int*);
++extern const char *avr_out_plus_set_ZN (rtx*, int*);
++extern const char *avr_out_cmp_ext (rtx*, enum rtx_code, int*);
+
+ extern const char *ashlqi3_out (rtx_insn *insn, rtx operands[], int *len);
+ extern const char *ashlhi3_out (rtx_insn *insn, rtx operands[], int *len);
+@@ -112,8 +114,6 @@ extern int jump_over_one_insn_p (rtx_insn *insn, rtx dest);
+
+ extern void avr_final_prescan_insn (rtx_insn *insn, rtx *operand,
+ int num_operands);
+-extern int avr_simplify_comparison_p (machine_mode mode,
+- RTX_CODE op, rtx x);
+ extern RTX_CODE avr_normalize_condition (RTX_CODE condition);
+ extern void out_shift_with_cnt (const char *templ, rtx_insn *insn,
+ rtx operands[], int *len, int t_len);
+@@ -145,6 +145,7 @@ extern rtx tmp_reg_rtx;
+ extern rtx zero_reg_rtx;
+ extern rtx all_regs_rtx[32];
+ extern rtx rampz_rtx;
++extern rtx cc_reg_rtx;
+
+ #endif /* RTX_CODE */
+
+@@ -160,6 +161,7 @@ class rtl_opt_pass;
+ extern rtl_opt_pass *make_avr_pass_pre_proep (gcc::context *);
+ extern rtl_opt_pass *make_avr_pass_recompute_notes (gcc::context *);
+ extern rtl_opt_pass *make_avr_pass_casesi (gcc::context *);
++extern rtl_opt_pass *make_avr_pass_ifelse (gcc::context *);
+
+ /* From avr-log.cc */
+
+--- a/src/gcc/config/avr/avr.cc
++++ b/src/gcc/config/avr/avr.cc
+@@ -359,6 +359,41 @@ public:
+ }
+ }; // avr_pass_casesi
+
++
++static const pass_data avr_pass_data_ifelse =
++{
++ RTL_PASS, // type
++ "", // name (will be patched)
++ OPTGROUP_NONE, // optinfo_flags
++ TV_DF_SCAN, // tv_id
++ 0, // properties_required
++ 0, // properties_provided
++ 0, // properties_destroyed
++ 0, // todo_flags_start
++ TODO_df_finish | TODO_df_verify // todo_flags_finish
++};
++
++class avr_pass_ifelse : public rtl_opt_pass
++{
++public:
++ avr_pass_ifelse (gcc::context *ctxt, const char *name)
++ : rtl_opt_pass (avr_pass_data_ifelse, ctxt)
++ {
++ this->name = name;
++ }
++
++ void avr_rest_of_handle_ifelse (function*);
++
++ virtual bool gate (function*) { return optimize > 0; }
++
++ virtual unsigned int execute (function *func)
++ {
++ avr_rest_of_handle_ifelse (func);
++
++ return 0;
++ }
++}; // avr_pass_ifelse
++
+ } // anon namespace
+
+ rtl_opt_pass*
+@@ -373,6 +408,12 @@ make_avr_pass_casesi (gcc::context *ctxt)
+ return new avr_pass_casesi (ctxt, "avr-casesi");
+ }
+
++rtl_opt_pass*
++make_avr_pass_ifelse (gcc::context *ctxt)
++{
++ return new avr_pass_ifelse (ctxt, "avr-ifelse");
++}
++
+
+ /* Make one parallel insn with all the patterns from insns i[0]..i[5]. */
+
+@@ -686,6 +727,304 @@ avr_pass_casesi::avr_rest_of_handle_casesi (function *func)
+ }
+
+
++/* A helper for the next method. Suppose we have two conditional branches
++
++ if (reg <cond1> xval1) goto label1;
++ if (reg <cond2> xval2) goto label2;
++
++ If the second comparison is redundant and there is a code <cond> such
++ that the sequence can be performed as
++
++ REG_CC = compare (reg, xval1);
++ if (REG_CC <cond1> 0) goto label1;
++ if (REG_CC <cond> 0) goto label2;
++
++ then return <cond>. Otherwise, return UNKNOWN.
++ xval1 and xval2 are CONST_INT, and mode is the scalar int mode in which
++ the comparison will be carried out. reverse_cond1 can be set to reverse
++ condition cond1. This is useful if the second comparison does not follow
++ the first one, but is located after label1 like in:
++
++ if (reg <cond1> xval1) goto label1;
++ ...
++ label1:
++ if (reg <cond2> xval2) goto label2; */
++
++static enum rtx_code
++avr_redundant_compare (enum rtx_code cond1, rtx xval1,
++ enum rtx_code cond2, rtx xval2,
++ machine_mode mode, bool reverse_cond1)
++{
++ HOST_WIDE_INT ival1 = INTVAL (xval1);
++ HOST_WIDE_INT ival2 = INTVAL (xval2);
++
++ unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
++ unsigned HOST_WIDE_INT uval1 = mask & UINTVAL (xval1);
++ unsigned HOST_WIDE_INT uval2 = mask & UINTVAL (xval2);
++
++ if (reverse_cond1)
++ cond1 = reverse_condition (cond1);
++
++ if (cond1 == EQ)
++ {
++ ////////////////////////////////////////////////
++ // A sequence like
++ // if (reg == val) goto label1;
++ // if (reg > val) goto label2;
++ // can be re-written using the same, simple comparison like in:
++ // REG_CC = compare (reg, val)
++ // if (REG_CC == 0) goto label1;
++ // if (REG_CC >= 0) goto label2;
++ if (ival1 == ival2
++ && (cond2 == GT || cond2 == GTU))
++ return avr_normalize_condition (cond2);
++
++ // Similar, but the input sequence is like
++ // if (reg == val) goto label1;
++ // if (reg >= val) goto label2;
++ if (ival1 == ival2
++ && (cond2 == GE || cond2 == GEU))
++ return cond2;
++
++ // Similar, but the input sequence is like
++ // if (reg == val) goto label1;
++ // if (reg >= val + 1) goto label2;
++ if ((cond2 == GE && ival2 == 1 + ival1)
++ || (cond2 == GEU && uval2 == 1 + uval1))
++ return cond2;
++
++ // Similar, but the input sequence is like
++ // if (reg == val) goto label1;
++ // if (reg > val - 1) goto label2;
++ if ((cond2 == GT && ival2 == ival1 - 1)
++ || (cond2 == GTU && uval2 == uval1 - 1))
++ return avr_normalize_condition (cond2);
++
++ /////////////////////////////////////////////////////////
++ // A sequence like
++ // if (reg == val) goto label1;
++ // if (reg < 1 + val) goto label2;
++ // can be re-written as
++ // REG_CC = compare (reg, val)
++ // if (REG_CC == 0) goto label1;
++ // if (REG_CC < 0) goto label2;
++ if ((cond2 == LT && ival2 == 1 + ival1)
++ || (cond2 == LTU && uval2 == 1 + uval1))
++ return cond2;
++
++ // Similar, but with an input sequence like
++ // if (reg == val) goto label1;
++ // if (reg <= val) goto label2;
++ if (ival1 == ival2
++ && (cond2 == LE || cond2 == LEU))
++ return avr_normalize_condition (cond2);
++
++ // Similar, but with an input sequence like
++ // if (reg == val) goto label1;
++ // if (reg < val) goto label2;
++ if (ival1 == ival2
++ && (cond2 == LT || cond2 == LTU))
++ return cond2;
++
++ // Similar, but with an input sequence like
++ // if (reg == val) goto label1;
++ // if (reg <= val - 1) goto label2;
++ if ((cond2 == LE && ival2 == ival1 - 1)
++ || (cond2 == LEU && uval2 == uval1 - 1))
++ return avr_normalize_condition (cond2);
++
++ } // cond1 == EQ
++
++ return UNKNOWN;
++}
++
++
++/* If-else decision trees generated for switch / case may produce sequences
++ like
++
++ SREG = compare (reg, val);
++ if (SREG == 0) goto label1;
++ SREG = compare (reg, 1 + val);
++ if (SREG >= 0) goto label2;
++
++ which can be optimized to
++
++ SREG = compare (reg, val);
++ if (SREG == 0) goto label1;
++ if (SREG >= 0) goto label2;
++
++ The optimal place for such a pass would be directly after expand, but
++ it's not possible for a jump insn to target more than one code label.
++ Hence, run a mini pass right before split2 which introduces REG_CC. */
++
++void
++avr_pass_ifelse::avr_rest_of_handle_ifelse (function*)
++{
++ rtx_insn *next_insn;
++
++ for (rtx_insn *insn = get_insns(); insn; insn = next_insn)
++ {
++ next_insn = next_nonnote_nondebug_insn (insn);
++
++ if (! next_insn)
++ break;
++
++ // Search for two cbranch insns. The first one is a cbranch.
++ // Filter for "cbranch<mode>4_insn" with mode in QI, HI, PSI, SI.
++
++ if (! JUMP_P (insn))
++ continue;
++
++ int icode1 = recog_memoized (insn);
++
++ if (icode1 != CODE_FOR_cbranchqi4_insn
++ && icode1 != CODE_FOR_cbranchhi4_insn
++ && icode1 != CODE_FOR_cbranchpsi4_insn
++ && icode1 != CODE_FOR_cbranchsi4_insn)
++ continue;
++
++ rtx_jump_insn *insn1 = as_a<rtx_jump_insn *> (insn);
++ rtx_jump_insn *insn2 = nullptr;
++ bool follow_label1 = false;
++
++ // Extract the operands of the first insn:
++ // $0 = comparison operator ($1, $2)
++ // $1 = reg
++ // $2 = reg or const_int
++ // $3 = code_label
++ // $4 = optional SCRATCH for HI, PSI, SI cases.
++
++ const auto &op = recog_data.operand;
++
++ extract_insn (insn1);
++ rtx xop1[5] = { op[0], op[1], op[2], op[3], op[4] };
++ int n_operands = recog_data.n_operands;
++
++ // For now, we can optimize cbranches that follow an EQ cbranch,
++ // and cbranches that follow the label of a NE cbranch.
++
++ if (GET_CODE (xop1[0]) == EQ
++ && JUMP_P (next_insn)
++ && recog_memoized (next_insn) == icode1)
++ {
++ // The 2nd cbranch insn follows insn1, i.e. is located in the
++ // fallthrough path of insn1.
++
++ insn2 = as_a<rtx_jump_insn *> (next_insn);
++ }
++ else if (GET_CODE (xop1[0]) == NE)
++ {
++ // insn1 might branch to a label followed by a cbranch.
++
++ rtx target1 = JUMP_LABEL (insn1);
++ rtx_insn *code_label1 = JUMP_LABEL_AS_INSN (insn1);
++ rtx_insn *next = next_nonnote_nondebug_insn (code_label1);
++ rtx_insn *barrier = prev_nonnote_nondebug_insn (code_label1);
++
++ if (// Target label of insn1 is used exactly once and
++ // is not a fallthru, i.e. is preceded by a barrier.
++ LABEL_NUSES (target1) == 1
++ && barrier
++ && BARRIER_P (barrier)
++ // Following the target label is a cbranch of the same kind.
++ && next
++ && JUMP_P (next)
++ && recog_memoized (next) == icode1)
++ {
++ follow_label1 = true;
++ insn2 = as_a<rtx_jump_insn *> (next);
++ }
++ }
++
++ if (! insn2)
++ continue;
++
++ // Also extract operands of insn2, and filter for REG + CONST_INT
++ // comparsons against the same register.
++
++ extract_insn (insn2);
++ rtx xop2[5] = { op[0], op[1], op[2], op[3], op[4] };
++
++ if (! rtx_equal_p (xop1[1], xop2[1])
++ || ! CONST_INT_P (xop1[2])
++ || ! CONST_INT_P (xop2[2]))
++ continue;
++
++ machine_mode mode = GET_MODE (xop1[1]);
++ enum rtx_code code1 = GET_CODE (xop1[0]);
++ enum rtx_code code2 = GET_CODE (xop2[0]);
++
++ code2 = avr_redundant_compare (code1, xop1[2], code2, xop2[2],
++ mode, follow_label1);
++ if (code2 == UNKNOWN)
++ continue;
++
++ //////////////////////////////////////////////////////
++ // Found a replacement.
++
++ if (dump_file)
++ {
++ fprintf (dump_file, "\n;; Found chain of jump_insn %d and"
++ " jump_insn %d, follow_label1=%d:\n",
++ INSN_UID (insn1), INSN_UID (insn2), follow_label1);
++ print_rtl_single (dump_file, PATTERN (insn1));
++ print_rtl_single (dump_file, PATTERN (insn2));
++ }
++
++ if (! follow_label1)
++ next_insn = next_nonnote_nondebug_insn (insn2);
++
++ // Pop the new branch conditions and the new comparison.
++ // Prematurely split into compare + branch so that we can drop
++ // the 2nd comparison. The following pass, split2, splits all
++ // insns for REG_CC, and it should still work as usual even when
++ // there are already some REG_CC insns around.
++
++ rtx xcond1 = gen_rtx_fmt_ee (code1, VOIDmode, cc_reg_rtx, const0_rtx);
++ rtx xcond2 = gen_rtx_fmt_ee (code2, VOIDmode, cc_reg_rtx, const0_rtx);
++ rtx xpat1 = gen_branch (xop1[3], xcond1);
++ rtx xpat2 = gen_branch (xop2[3], xcond2);
++ rtx xcompare = NULL_RTX;
++
++ if (mode == QImode)
++ {
++ gcc_assert (n_operands == 4);
++ xcompare = gen_cmpqi3 (xop1[1], xop1[2]);
++ }
++ else
++ {
++ gcc_assert (n_operands == 5);
++ rtx (*gen_cmp)(rtx,rtx,rtx)
++ = mode == HImode ? gen_gen_comparehi
++ : mode == PSImode ? gen_gen_comparepsi
++ : gen_gen_comparesi; // SImode
++ xcompare = gen_cmp (xop1[1], xop1[2], xop1[4]);
++ }
++
++ // Emit that stuff.
++
++ rtx_insn *cmp = emit_insn_before (xcompare, insn1);
++ rtx_jump_insn *branch1 = emit_jump_insn_before (xpat1, insn1);
++ rtx_jump_insn *branch2 = emit_jump_insn_before (xpat2, insn2);
++
++ JUMP_LABEL (branch1) = xop1[3];
++ JUMP_LABEL (branch2) = xop2[3];
++ // delete_insn() decrements LABEL_NUSES when deleting a JUMP_INSN, but
++ // when we pop a new JUMP_INSN, do it by hand.
++ ++LABEL_NUSES (xop1[3]);
++ ++LABEL_NUSES (xop2[3]);
++
++ delete_insn (insn1);
++ delete_insn (insn2);
++
++ // As a side effect, also recog the new insns.
++ gcc_assert (valid_insn_p (cmp));
++ gcc_assert (valid_insn_p (branch1));
++ gcc_assert (valid_insn_p (branch2));
++ } // loop insns
++}
++
++
+ /* Set `avr_arch' as specified by `-mmcu='.
+ Return true on success. */
+
+@@ -1019,6 +1358,19 @@ avr_no_gccisr_function_p (tree func)
+ return avr_lookup_function_attribute1 (func, "no_gccisr");
+ }
+
++
++/* Implement `TARGET_CAN_INLINE_P'. */
++/* Some options like -mgas_isr_prologues depend on optimization level,
++ and the inliner might think that due to different options, inlining
++ is not permitted; see PR104327. */
++
++static bool
++avr_can_inline_p (tree /* caller */, tree /* callee */)
++{
++ // No restrictions whatsoever.
++ return true;
++}
++
+ /* Implement `TARGET_SET_CURRENT_FUNCTION'. */
+ /* Sanity cheching for above function attributes. */
+
+@@ -3173,28 +3525,6 @@ avr_asm_final_postscan_insn (FILE *stream, rtx_insn *insn, rtx*, int)
+ }
+
+
+-/* Return 0 if undefined, 1 if always true or always false. */
+-
+-int
+-avr_simplify_comparison_p (machine_mode mode, RTX_CODE op, rtx x)
+-{
+- unsigned int max = (mode == QImode ? 0xff :
+- mode == HImode ? 0xffff :
+- mode == PSImode ? 0xffffff :
+- mode == SImode ? 0xffffffff : 0);
+- if (max && op && CONST_INT_P (x))
+- {
+- if (unsigned_condition (op) != op)
+- max >>= 1;
+-
+- if (max != (INTVAL (x) & max)
+- && INTVAL (x) != 0xff)
+- return 1;
+- }
+- return 0;
+-}
+-
+-
+ /* Worker function for `FUNCTION_ARG_REGNO_P'. */
+ /* Returns nonzero if REGNO is the number of a hard
+ register in which function arguments are sometimes passed. */
+@@ -5677,29 +6007,36 @@ avr_frame_pointer_required_p (void)
+ || get_frame_size () > 0);
+ }
+
+-/* Returns the condition of compare insn INSN, or UNKNOWN. */
++
++/* Returns the condition of the branch following INSN, where INSN is some
++ comparison. If the next insn is not a branch or the condition code set
++ by INSN might be used by more insns than the next one, return UNKNOWN.
++ For now, just look at the next insn, which misses some opportunities like
++ following jumps. */
+
+ static RTX_CODE
+ compare_condition (rtx_insn *insn)
+ {
+- rtx_insn *next = next_real_insn (insn);
++ rtx set;
++ rtx_insn *next = next_real_nondebug_insn (insn);
+
+- if (next && JUMP_P (next))
++ if (next
++ && JUMP_P (next)
++ // If SREG does not die in the next insn, it is used in more than one
++ // branch. This can happen due to pass .avr-ifelse optimizations.
++ && dead_or_set_regno_p (next, REG_CC)
++ // Branches are (set (pc) (if_then_else (COND (...)))).
++ && (set = single_set (next))
++ && GET_CODE (SET_SRC (set)) == IF_THEN_ELSE)
+ {
+- rtx pat = PATTERN (next);
+- if (GET_CODE (pat) == PARALLEL)
+- pat = XVECEXP (pat, 0, 0);
+- rtx src = SET_SRC (pat);
+-
+- if (IF_THEN_ELSE == GET_CODE (src))
+- return GET_CODE (XEXP (src, 0));
++ return GET_CODE (XEXP (SET_SRC (set), 0));
+ }
+
+ return UNKNOWN;
+ }
+
+
+-/* Returns true iff INSN is a tst insn that only tests the sign. */
++/* Returns true if INSN is a tst insn that only tests the sign. */
+
+ static bool
+ compare_sign_p (rtx_insn *insn)
+@@ -5709,23 +6046,95 @@ compare_sign_p (rtx_insn *insn)
+ }
+
+
+-/* Returns true iff the next insn is a JUMP_INSN with a condition
+- that needs to be swapped (GT, GTU, LE, LEU). */
++/* Returns true if INSN is a compare insn with the EQ or NE condition. */
+
+ static bool
+-compare_diff_p (rtx_insn *insn)
++compare_eq_p (rtx_insn *insn)
+ {
+ RTX_CODE cond = compare_condition (insn);
+- return (cond == GT || cond == GTU || cond == LE || cond == LEU) ? cond : 0;
++ return (cond == EQ || cond == NE);
+ }
+
+-/* Returns true iff INSN is a compare insn with the EQ or NE condition. */
+
+-static bool
+-compare_eq_p (rtx_insn *insn)
++/* Implement `TARGET_CANONICALIZE_COMPARISON'. */
++/* Basically tries to convert "difficult" comparisons like GT[U]
++ and LE[U] to simple ones. Some asymmetric comparisons can be
++ transformed to EQ or NE against zero. */
++
++static void
++avr_canonicalize_comparison (int *icode, rtx *op0, rtx *op1, bool op0_fixed)
+ {
+- RTX_CODE cond = compare_condition (insn);
+- return (cond == EQ || cond == NE);
++ enum rtx_code code = (enum rtx_code) *icode;
++ machine_mode mode = GET_MODE (*op0);
++
++ bool signed_p = code == GT || code == LE;
++ bool unsigned_p = code == GTU || code == LEU;
++ bool difficult_p = signed_p || unsigned_p;
++
++ if (// Only do integers and fixed-points.
++ (! SCALAR_INT_MODE_P (mode)
++ && ! ALL_SCALAR_FIXED_POINT_MODE_P (mode))
++ // Only do comparisons against a register.
++ || ! register_operand (*op0, mode))
++ return;
++
++ // Canonicalize "difficult" reg-reg comparisons.
++
++ if (! op0_fixed
++ && difficult_p
++ && register_operand (*op1, mode))
++ {
++ std::swap (*op0, *op1);
++ *icode = (int) swap_condition (code);
++ return;
++ }
++
++ // Canonicalize comparisons against compile-time constants.
++
++ if (CONST_INT_P (*op1)
++ || CONST_FIXED_P (*op1))
++ {
++ // INT_MODE of the same size.
++ scalar_int_mode imode = int_mode_for_mode (mode).require ();
++
++ unsigned HOST_WIDE_INT mask = GET_MODE_MASK (imode);
++ unsigned HOST_WIDE_INT maxval = signed_p ? mask >> 1 : mask;
++
++ // Convert value *op1 to imode.
++ rtx xval = simplify_gen_subreg (imode, *op1, mode, 0);
++
++ // Canonicalize difficult comparisons against const.
++ if (difficult_p
++ && (UINTVAL (xval) & mask) != maxval)
++ {
++ // Convert *op0 > *op1 to *op0 >= 1 + *op1.
++ // Convert *op0 <= *op1 to *op0 < 1 + *op1.
++ xval = simplify_binary_operation (PLUS, imode, xval, const1_rtx);
++
++ // Convert value back to its original mode.
++ *op1 = simplify_gen_subreg (mode, xval, imode, 0);
++
++ // Map > to >= and <= to <.
++ *icode = (int) avr_normalize_condition (code);
++
++ return;
++ }
++
++ // Some asymmetric comparisons can be turned into EQ or NE.
++ if (code == LTU && xval == const1_rtx)
++ {
++ *icode = (int) EQ;
++ *op1 = CONST0_RTX (mode);
++ return;
++ }
++
++ if (code == GEU && xval == const1_rtx)
++ {
++ *icode = (int) NE;
++ *op1 = CONST0_RTX (mode);
++ return;
++ }
++ }
+ }
+
+
+@@ -6018,6 +6427,68 @@ avr_out_tstsi (rtx_insn *insn, rtx *op, int *plen)
+ }
+
+
++/* Output a comparison of a zero- or sign-extended register against a
++ plain register. CODE is SIGN_EXTEND or ZERO_EXTEND. Return "".
++
++ PLEN != 0: Set *PLEN to the code length in words. Don't output anything.
++ PLEN == 0: Print instructions. */
++
++const char*
++avr_out_cmp_ext (rtx xop[], enum rtx_code code, int *plen)
++{
++ // The smaller reg is the one that's to be extended. Get its index as z.
++ int z = GET_MODE_SIZE (GET_MODE (xop[1])) < GET_MODE_SIZE (GET_MODE (xop[0]));
++ rtx zreg = xop[z];
++ rtx reg = xop[1 - z];
++ machine_mode mode = GET_MODE (reg);
++ machine_mode zmode = GET_MODE (zreg);
++ rtx zex;
++
++ if (plen)
++ *plen = 0;
++
++ // zex holds the extended bytes above zreg. This is 0 for ZERO_EXTEND,
++ // and 0 or -1 for SIGN_EXTEND.
++
++ if (code == SIGN_EXTEND)
++ {
++ // Sign-extend the high-byte of zreg to tmp_reg.
++ int zmsb = GET_MODE_SIZE (zmode) - 1;
++ rtx xzmsb = simplify_gen_subreg (QImode, zreg, zmode, zmsb);
++
++ avr_asm_len ("mov __tmp_reg__,%0" CR_TAB
++ "rol __tmp_reg__" CR_TAB
++ "sbc __tmp_reg__,__tmp_reg__", &xzmsb, plen, 3);
++ zex = tmp_reg_rtx;
++ }
++ else if (code == ZERO_EXTEND)
++ {
++ zex = zero_reg_rtx;
++ }
++ else
++ gcc_unreachable();
++
++ // Now output n_bytes bytes of the very comparison.
++
++ int n_bytes = GET_MODE_SIZE (mode);
++
++ avr_asm_len ("cp %0,%1", xop, plen, 1);
++
++ for (int b = 1; b < n_bytes; ++b)
++ {
++ rtx regs[2];
++ regs[1 - z] = simplify_gen_subreg (QImode, reg, mode, b);
++ regs[z] = (b < GET_MODE_SIZE (zmode)
++ ? simplify_gen_subreg (QImode, zreg, zmode, b)
++ : zex);
++
++ avr_asm_len ("cpc %0,%1", regs, plen, 1);
++ }
++
++ return "";
++}
++
++
+ /* Generate asm equivalent for various shifts. This only handles cases
+ that are not already carefully hand-optimized in ?sh??i3_out.
+
+@@ -8160,6 +8631,122 @@ avr_out_plus (rtx insn, rtx *xop, int *plen, int *pcc, bool out_label)
+ }
+
+
++/* Output an instruction sequence for addition of REG in XOP[0] and CONST_INT
++ in XOP[1] in such a way that SREG.Z and SREG.N are set according to the
++ result. XOP[2] might be a d-regs clobber register. If XOP[2] is SCRATCH,
++ then the addition can be performed without a clobber reg. Return "".
++
++ If PLEN == NULL, then output the instructions.
++ If PLEN != NULL, then set *PLEN to the length of the sequence in words. */
++
++const char*
++avr_out_plus_set_ZN (rtx *xop, int *plen)
++{
++ if (plen)
++ *plen = 0;
++
++ // Register to compare and value to compare against.
++ rtx xreg = xop[0];
++ rtx xval = xop[1];
++
++ machine_mode mode = GET_MODE (xreg);
++
++ // Number of bytes to operate on.
++ int n_bytes = GET_MODE_SIZE (mode);
++
++ if (n_bytes == 1)
++ {
++ if (INTVAL (xval) == 1)
++ return avr_asm_len ("inc %0", xop, plen, 1);
++
++ if (INTVAL (xval) == -1)
++ return avr_asm_len ("dec %0", xop, plen, 1);
++ }
++
++ if (n_bytes == 2
++ && test_hard_reg_class (ADDW_REGS, xreg)
++ && IN_RANGE (INTVAL (xval), 1, 63))
++ {
++ // Add 16-bit value in [1..63] to a w register.
++ return avr_asm_len ("adiw %0, %1", xop, plen, 1);
++ }
++
++ // Addition won't work; subtract the negative of XVAL instead.
++ xval = simplify_unary_operation (NEG, mode, xval, mode);
++
++ // Value (0..0xff) held in clobber register xop[2] or -1 if unknown.
++ int clobber_val = -1;
++
++ // [0] = Current sub-register.
++ // [1] = Current partial xval.
++ // [2] = 8-bit clobber d-register or SCRATCH.
++ rtx op[3];
++ op[2] = xop[2];
++
++ // Work byte-wise from LSB to MSB. The lower two bytes might be
++ // SBIW'ed in one go.
++ for (int i = 0; i < n_bytes; ++i)
++ {
++ op[0] = simplify_gen_subreg (QImode, xreg, mode, i);
++
++ if (i == 0
++ && n_bytes >= 2
++ && test_hard_reg_class (ADDW_REGS, op[0]))
++ {
++ op[1] = simplify_gen_subreg (HImode, xval, mode, 0);
++ if (IN_RANGE (INTVAL (op[1]), 0, 63))
++ {
++ // SBIW can handle the lower 16 bits.
++ avr_asm_len ("sbiw %0, %1", op, plen, 1);
++
++ // Next byte has already been handled: Skip it.
++ ++i;
++ continue;
++ }
++ }
++
++ op[1] = simplify_gen_subreg (QImode, xval, mode, i);
++
++ if (test_hard_reg_class (LD_REGS, op[0]))
++ {
++ // d-regs can subtract immediates.
++ avr_asm_len (i == 0
++ ? "subi %0, %1"
++ : "sbci %0, %1", op, plen, 1);
++ }
++ else
++ {
++ int val8 = 0xff & INTVAL (op[1]);
++ if (val8 == 0)
++ {
++ // Any register can subtract 0.
++ avr_asm_len (i == 0
++ ? "sub %0, __zero_reg__"
++ : "sbc %0, __zero_reg__", op, plen, 1);
++ }
++ else
++ {
++ // Use d-register to hold partial xval.
++
++ if (val8 != clobber_val)
++ {
++ // Load partial xval to QI clobber reg and memoize for later.
++ gcc_assert (REG_P (op[2]));
++ avr_asm_len ("ldi %2, %1", op, plen, 1);
++ clobber_val = val8;
++ }
++
++ avr_asm_len (i == 0
++ ? "sub %0, %2"
++ : "sbc %0, %2", op, plen, 1);
++ }
++ }
++ } // Loop bytes.
++
++ return "";
++}
++
++
+ /* Output bit operation (IOR, AND, XOR) with register XOP[0] and compile
+ time constant XOP[2]:
+
+@@ -9291,6 +9878,8 @@ avr_adjust_insn_length (rtx_insn *insn, int len)
+ case ADJUST_LEN_TSTSI: avr_out_tstsi (insn, op, &len); break;
+ case ADJUST_LEN_COMPARE: avr_out_compare (insn, op, &len); break;
+ case ADJUST_LEN_COMPARE64: avr_out_compare64 (insn, op, &len); break;
++ case ADJUST_LEN_CMP_UEXT: avr_out_cmp_ext (op, ZERO_EXTEND, &len); break;
++ case ADJUST_LEN_CMP_SEXT: avr_out_cmp_ext (op, SIGN_EXTEND, &len); break;
+
+ case ADJUST_LEN_LSHRQI: lshrqi3_out (insn, op, &len); break;
+ case ADJUST_LEN_LSHRHI: lshrhi3_out (insn, op, &len); break;
+@@ -9311,6 +9900,7 @@ avr_adjust_insn_length (rtx_insn *insn, int len)
+ case ADJUST_LEN_CALL: len = AVR_HAVE_JMP_CALL ? 2 : 1; break;
+
+ case ADJUST_LEN_INSERT_BITS: avr_out_insert_bits (op, &len); break;
++ case ADJUST_LEN_ADD_SET_ZN: avr_out_plus_set_ZN (op, &len); break;
+
+ case ADJUST_LEN_INSV_NOTBIT:
+ avr_out_insert_notbit (insn, op, NULL_RTX, &len);
+@@ -10607,6 +11197,58 @@ avr_mul_highpart_cost (rtx x, int)
+ }
+
+
++/* Return the expected cost of a conditional branch like
++ (set (pc)
++ (if_then_else (X)
++ (label_ref *)
++ (pc)))
++ where X is some comparison operator. */
++
++static int
++avr_cbranch_cost (rtx x)
++{
++ bool difficult_p = difficult_comparison_operator (x, VOIDmode);
++
++ if (reload_completed)
++ {
++ // After reload, we basically just have plain branches.
++ return COSTS_N_INSNS (1 + difficult_p);
++ }
++
++ rtx xreg = XEXP (x, 0);
++ rtx xval = XEXP (x, 1);
++ machine_mode mode = GET_MODE (xreg);
++ if (mode == VOIDmode)
++ mode = GET_MODE (xval);
++ int size = GET_MODE_SIZE (mode);
++
++ if (GET_CODE (xreg) == ZERO_EXTEND
++ || GET_CODE (xval) == ZERO_EXTEND)
++ {
++ // *cbranch<HISI:mode>.<code><QIPSI:mode>.0/1, code = zero_extend.
++ return COSTS_N_INSNS (size + 1);
++ }
++
++ if (GET_CODE (xreg) == SIGN_EXTEND
++ || GET_CODE (xval) == SIGN_EXTEND)
++ {
++ // *cbranch<HISI:mode>.<code><QIPSI:mode>.0/1, code = sign_extend.
++ // Make it a bit cheaper than it actually is (less reg pressure).
++ return COSTS_N_INSNS (size + 1 + 1);
++ }
++
++ bool reg_p = register_operand (xreg, mode);
++ bool reg_or_0_p = reg_or_0_operand (xval, mode);
++
++ return COSTS_N_INSNS (size
++ // For the branch
++ + 1 + difficult_p
++ // Combine might propagate constants other than zero
++ // into the 2nd operand. Make that more expensive.
++ + 1 * (!reg_p || !reg_or_0_p));
++}
++
++
+ /* Mutually recursive subroutine of avr_rtx_cost for calculating the
+ cost of an RTX operand given its context. X is the rtx of the
+ operand, MODE is its mode, and OUTER is the rtx_code of this
+@@ -10844,6 +11486,15 @@ avr_rtx_costs_1 (rtx x, machine_mode mode, int outer_code,
+ *total += COSTS_N_INSNS (1);
+ return true;
+ }
++ if (IOR == code
++ && AND == GET_CODE (XEXP (x, 0))
++ && AND == GET_CODE (XEXP (x, 1))
++ && single_zero_operand (XEXP (XEXP (x, 0), 1), mode))
++ {
++ // Open-coded bit transfer.
++ *total = COSTS_N_INSNS (2);
++ return true;
++ }
+ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode));
+ *total += avr_operand_rtx_cost (XEXP (x, 0), mode, code, 0, speed);
+ if (!CONST_INT_P (XEXP (x, 1)))
+@@ -11490,6 +12141,15 @@ avr_rtx_costs_1 (rtx x, machine_mode mode, int outer_code,
+ }
+ break;
+
++ case IF_THEN_ELSE:
++ if (outer_code == SET
++ && XEXP (x, 2) == pc_rtx
++ && ordered_comparison_operator (XEXP (x, 0), VOIDmode))
++ {
++ *total = avr_cbranch_cost (XEXP (x, 0));
++ return true;
++ }
++
+ default:
+ break;
+ }
+@@ -11515,6 +12175,52 @@ avr_rtx_costs (rtx x, machine_mode mode, int outer_code,
+ }
+
+
++/* Implement `TARGET_INSN_COST'. */
++/* For some insns, it is not enough to look at the cost of the SET_SRC.
++ In that case, have a look at the entire insn, e.g. during insn combine. */
++
++static int
++avr_insn_cost (rtx_insn *insn, bool speed)
++{
++ const int unknown_cost = -1;
++ int cost = unknown_cost;
++
++ rtx set = single_set (insn);
++
++ if (set
++ && ZERO_EXTRACT == GET_CODE (SET_DEST (set)))
++ {
++ // Try find anything that would flip the extracted bit.
++ bool not_bit_p = false;
++
++ subrtx_iterator::array_type array;
++ FOR_EACH_SUBRTX (iter, array, SET_SRC (set), NONCONST)
++ {
++ enum rtx_code code = GET_CODE (*iter);
++ not_bit_p |= code == NOT || code == XOR || code == GE;
++ }
++
++ // Don't go too deep into the analysis. In almost all cases,
++ // using BLD/BST is the best we can do for single-bit moves,
++ // even considering CSE.
++ cost = COSTS_N_INSNS (2 + not_bit_p);
++ }
++
++ if (cost != unknown_cost)
++ {
++ if (avr_log.rtx_costs)
++ avr_edump ("\n%? (%s) insn_cost=%d\n%r\n",
++ speed ? "speed" : "size", cost, insn);
++ return cost;
++ }
++
++ // Resort to what rtlanal.cc::insn_cost() implements as a default
++ // when targetm.insn_cost() is not implemented.
++
++ return pattern_cost (PATTERN (insn), speed);
++}
++
++
+ /* Implement `TARGET_ADDRESS_COST'. */
+
+ static int
+@@ -11602,281 +12308,6 @@ avr_normalize_condition (RTX_CODE condition)
+ }
+ }
+
+-/* Helper function for `avr_reorg'. */
+-
+-static rtx
+-avr_compare_pattern (rtx_insn *insn)
+-{
+- rtx pattern = single_set (insn);
+-
+- if (pattern
+- && NONJUMP_INSN_P (insn)
+- && REG_P (SET_DEST (pattern))
+- && REGNO (SET_DEST (pattern)) == REG_CC
+- && GET_CODE (SET_SRC (pattern)) == COMPARE)
+- {
+- machine_mode mode0 = GET_MODE (XEXP (SET_SRC (pattern), 0));
+- machine_mode mode1 = GET_MODE (XEXP (SET_SRC (pattern), 1));
+-
+- /* The 64-bit comparisons have fixed operands ACC_A and ACC_B.
+- They must not be swapped, thus skip them. */
+-
+- if ((mode0 == VOIDmode || GET_MODE_SIZE (mode0) <= 4)
+- && (mode1 == VOIDmode || GET_MODE_SIZE (mode1) <= 4))
+- return pattern;
+- }
+-
+- return NULL_RTX;
+-}
+-
+-/* Helper function for `avr_reorg'. */
+-
+-/* Expansion of switch/case decision trees leads to code like
+-
+- REG_CC = compare (Reg, Num)
+- if (REG_CC == 0)
+- goto L1
+-
+- REG_CC = compare (Reg, Num)
+- if (REG_CC > 0)
+- goto L2
+-
+- The second comparison is superfluous and can be deleted.
+- The second jump condition can be transformed from a
+- "difficult" one to a "simple" one because "REG_CC > 0" and
+- "REG_CC >= 0" will have the same effect here.
+-
+- This function relies on the way switch/case is being expaned
+- as binary decision tree. For example code see PR 49903.
+-
+- Return TRUE if optimization performed.
+- Return FALSE if nothing changed.
+-
+- INSN1 is a comparison, i.e. avr_compare_pattern != 0.
+-
+- We don't want to do this in text peephole because it is
+- tedious to work out jump offsets there and the second comparison
+- might have been transormed by `avr_reorg'.
+-
+- RTL peephole won't do because peephole2 does not scan across
+- basic blocks. */
+-
+-static bool
+-avr_reorg_remove_redundant_compare (rtx_insn *insn1)
+-{
+- rtx comp1, ifelse1, xcond1;
+- rtx_insn *branch1;
+- rtx comp2, ifelse2, xcond2;
+- rtx_insn *branch2, *insn2;
+- enum rtx_code code;
+- rtx_insn *jump;
+- rtx target, cond;
+-
+- /* Look out for: compare1 - branch1 - compare2 - branch2 */
+-
+- branch1 = next_nonnote_nondebug_insn (insn1);
+- if (!branch1 || !JUMP_P (branch1))
+- return false;
+-
+- insn2 = next_nonnote_nondebug_insn (branch1);
+- if (!insn2 || !avr_compare_pattern (insn2))
+- return false;
+-
+- branch2 = next_nonnote_nondebug_insn (insn2);
+- if (!branch2 || !JUMP_P (branch2))
+- return false;
+-
+- comp1 = avr_compare_pattern (insn1);
+- comp2 = avr_compare_pattern (insn2);
+- xcond1 = single_set (branch1);
+- xcond2 = single_set (branch2);
+-
+- if (!comp1 || !comp2
+- || !rtx_equal_p (comp1, comp2)
+- || !xcond1 || SET_DEST (xcond1) != pc_rtx
+- || !xcond2 || SET_DEST (xcond2) != pc_rtx
+- || IF_THEN_ELSE != GET_CODE (SET_SRC (xcond1))
+- || IF_THEN_ELSE != GET_CODE (SET_SRC (xcond2)))
+- {
+- return false;
+- }
+-
+- comp1 = SET_SRC (comp1);
+- ifelse1 = SET_SRC (xcond1);
+- ifelse2 = SET_SRC (xcond2);
+-
+- /* comp<n> is COMPARE now and ifelse<n> is IF_THEN_ELSE. */
+-
+- if (EQ != GET_CODE (XEXP (ifelse1, 0))
+- || !REG_P (XEXP (comp1, 0))
+- || !CONST_INT_P (XEXP (comp1, 1))
+- || XEXP (ifelse1, 2) != pc_rtx
+- || XEXP (ifelse2, 2) != pc_rtx
+- || LABEL_REF != GET_CODE (XEXP (ifelse1, 1))
+- || LABEL_REF != GET_CODE (XEXP (ifelse2, 1))
+- || !COMPARISON_P (XEXP (ifelse2, 0))
+- || REG_CC != REGNO (XEXP (XEXP (ifelse1, 0), 0))
+- || REG_CC != REGNO (XEXP (XEXP (ifelse2, 0), 0))
+- || const0_rtx != XEXP (XEXP (ifelse1, 0), 1)
+- || const0_rtx != XEXP (XEXP (ifelse2, 0), 1))
+- {
+- return false;
+- }
+-
+- /* We filtered the insn sequence to look like
+-
+- (set (reg:CC cc)
+- (compare (reg:M N)
+- (const_int VAL)))
+- (set (pc)
+- (if_then_else (eq (reg:CC cc)
+- (const_int 0))
+- (label_ref L1)
+- (pc)))
+-
+- (set (reg:CC cc)
+- (compare (reg:M N)
+- (const_int VAL)))
+- (set (pc)
+- (if_then_else (CODE (reg:CC cc)
+- (const_int 0))
+- (label_ref L2)
+- (pc)))
+- */
+-
+- code = GET_CODE (XEXP (ifelse2, 0));
+-
+- /* Map GT/GTU to GE/GEU which is easier for AVR.
+- The first two instructions compare/branch on EQ
+- so we may replace the difficult
+-
+- if (x == VAL) goto L1;
+- if (x > VAL) goto L2;
+-
+- with easy
+-
+- if (x == VAL) goto L1;
+- if (x >= VAL) goto L2;
+-
+- Similarly, replace LE/LEU by LT/LTU. */
+-
+- switch (code)
+- {
+- case EQ:
+- case LT: case LTU:
+- case GE: case GEU:
+- break;
+-
+- case LE: case LEU:
+- case GT: case GTU:
+- code = avr_normalize_condition (code);
+- break;
+-
+- default:
+- return false;
+- }
+-
+- /* Wrap the branches into UNSPECs so they won't be changed or
+- optimized in the remainder. */
+-
+- target = XEXP (XEXP (ifelse1, 1), 0);
+- cond = XEXP (ifelse1, 0);
+- jump = emit_jump_insn_after (gen_branch_unspec (target, cond), insn1);
+-
+- JUMP_LABEL (jump) = JUMP_LABEL (branch1);
+-
+- target = XEXP (XEXP (ifelse2, 1), 0);
+- cond = gen_rtx_fmt_ee (code, VOIDmode, cc_reg_rtx, const0_rtx);
+- jump = emit_jump_insn_after (gen_branch_unspec (target, cond), insn2);
+-
+- JUMP_LABEL (jump) = JUMP_LABEL (branch2);
+-
+- /* The comparisons in insn1 and insn2 are exactly the same;
+- insn2 is superfluous so delete it. */
+-
+- delete_insn (insn2);
+- delete_insn (branch1);
+- delete_insn (branch2);
+-
+- return true;
+-}
+-
+-
+-/* Implement `TARGET_MACHINE_DEPENDENT_REORG'. */
+-/* Optimize conditional jumps. */
+-
+-static void
+-avr_reorg (void)
+-{
+- rtx_insn *insn = get_insns();
+-
+- for (insn = next_real_insn (insn); insn; insn = next_real_insn (insn))
+- {
+- rtx pattern = avr_compare_pattern (insn);
+-
+- if (!pattern)
+- continue;
+-
+- if (optimize
+- && avr_reorg_remove_redundant_compare (insn))
+- {
+- continue;
+- }
+-
+- if (compare_diff_p (insn))
+- {
+- /* Now we work under compare insn with difficult branch. */
+-
+- rtx_insn *next = next_real_insn (insn);
+- rtx pat = PATTERN (next);
+- if (GET_CODE (pat) == PARALLEL)
+- pat = XVECEXP (pat, 0, 0);
+-
+- pattern = SET_SRC (pattern);
+-
+- if (true_regnum (XEXP (pattern, 0)) >= 0
+- && true_regnum (XEXP (pattern, 1)) >= 0)
+- {
+- rtx x = XEXP (pattern, 0);
+- rtx src = SET_SRC (pat);
+- rtx t = XEXP (src, 0);
+- PUT_CODE (t, swap_condition (GET_CODE (t)));
+- XEXP (pattern, 0) = XEXP (pattern, 1);
+- XEXP (pattern, 1) = x;
+- INSN_CODE (next) = -1;
+- }
+- else if (true_regnum (XEXP (pattern, 0)) >= 0
+- && XEXP (pattern, 1) == const0_rtx)
+- {
+- /* This is a tst insn, we can reverse it. */
+- rtx src = SET_SRC (pat);
+- rtx t = XEXP (src, 0);
+-
+- PUT_CODE (t, swap_condition (GET_CODE (t)));
+- XEXP (pattern, 1) = XEXP (pattern, 0);
+- XEXP (pattern, 0) = const0_rtx;
+- INSN_CODE (next) = -1;
+- INSN_CODE (insn) = -1;
+- }
+- else if (true_regnum (XEXP (pattern, 0)) >= 0
+- && CONST_INT_P (XEXP (pattern, 1)))
+- {
+- rtx x = XEXP (pattern, 1);
+- rtx src = SET_SRC (pat);
+- rtx t = XEXP (src, 0);
+- machine_mode mode = GET_MODE (XEXP (pattern, 0));
+-
+- if (avr_simplify_comparison_p (mode, GET_CODE (t), x))
+- {
+- XEXP (pattern, 1) = gen_int_mode (INTVAL (x) + 1, mode);
+- PUT_CODE (t, avr_normalize_condition (GET_CODE (t)));
+- INSN_CODE (next) = -1;
+- INSN_CODE (insn) = -1;
+- }
+- }
+- }
+- }
+-}
+
+ /* Returns register number for function return value.*/
+
+@@ -14572,6 +15003,8 @@ avr_float_lib_compare_returns_bool (machine_mode mode, enum rtx_code)
+ #undef TARGET_ASM_FINAL_POSTSCAN_INSN
+ #define TARGET_ASM_FINAL_POSTSCAN_INSN avr_asm_final_postscan_insn
+
++#undef TARGET_INSN_COST
++#define TARGET_INSN_COST avr_insn_cost
+ #undef TARGET_REGISTER_MOVE_COST
+ #define TARGET_REGISTER_MOVE_COST avr_register_move_cost
+ #undef TARGET_MEMORY_MOVE_COST
+@@ -14580,8 +15013,6 @@ avr_float_lib_compare_returns_bool (machine_mode mode, enum rtx_code)
+ #define TARGET_RTX_COSTS avr_rtx_costs
+ #undef TARGET_ADDRESS_COST
+ #define TARGET_ADDRESS_COST avr_address_cost
+-#undef TARGET_MACHINE_DEPENDENT_REORG
+-#define TARGET_MACHINE_DEPENDENT_REORG avr_reorg
+ #undef TARGET_FUNCTION_ARG
+ #define TARGET_FUNCTION_ARG avr_function_arg
+ #undef TARGET_FUNCTION_ARG_ADVANCE
+@@ -14711,6 +15142,12 @@ avr_float_lib_compare_returns_bool (machine_mode mode, enum rtx_code)
+ #undef TARGET_MD_ASM_ADJUST
+ #define TARGET_MD_ASM_ADJUST avr_md_asm_adjust
+
++#undef TARGET_CAN_INLINE_P
++#define TARGET_CAN_INLINE_P avr_can_inline_p
++
++#undef TARGET_CANONICALIZE_COMPARISON
++#define TARGET_CANONICALIZE_COMPARISON avr_canonicalize_comparison
++
+ struct gcc_target targetm = TARGET_INITIALIZER;
+
+ \f
+--- a/src/gcc/config/avr/avr.md
++++ b/src/gcc/config/avr/avr.md
+@@ -77,7 +77,6 @@ (define_c_enum "unspec"
+ UNSPEC_FMULS
+ UNSPEC_FMULSU
+ UNSPEC_COPYSIGN
+- UNSPEC_IDENTITY
+ UNSPEC_INSERT_BITS
+ UNSPEC_ROUND
+ ])
+@@ -165,6 +164,7 @@ (define_attr "adjust_len"
+ ashlsi, ashrsi, lshrsi,
+ ashlpsi, ashrpsi, lshrpsi,
+ insert_bits, insv_notbit, insv_notbit_0, insv_notbit_7,
++ add_set_ZN, cmp_uext, cmp_sext,
+ no"
+ (const_string "no"))
+
+@@ -251,11 +251,23 @@ (define_mode_iterator QIHI [QI HI])
+ (define_mode_iterator QIHI2 [QI HI])
+ (define_mode_iterator QISI [QI HI PSI SI])
+ (define_mode_iterator QIDI [QI HI PSI SI DI])
++(define_mode_iterator QIPSI [QI HI PSI])
+ (define_mode_iterator HISI [HI PSI SI])
+
++;; Ordered integral and fixed-point modes of specific sizes.
+ (define_mode_iterator ALL1 [QI QQ UQQ])
+ (define_mode_iterator ALL2 [HI HQ UHQ HA UHA])
+ (define_mode_iterator ALL4 [SI SQ USQ SA USA])
++(define_mode_iterator ALL234 [HI SI PSI
++ HQ UHQ HA UHA
++ SQ USQ SA USA])
++
++;; Ordered signed integral and signed fixed-point modes of specific sizes.
++(define_mode_iterator ALLs1 [QI QQ])
++(define_mode_iterator ALLs2 [HI HQ HA])
++(define_mode_iterator ALLs4 [SI SQ SA])
++(define_mode_iterator ALLs234 [HI SI PSI
++ HQ HA SQ SA])
+
+ ;; All supported move-modes
+ (define_mode_iterator MOVMODE [QI QQ UQQ
+@@ -273,15 +285,17 @@ (define_mode_iterator SPLIT34 [SI SF PSI
+ SQ USQ SA USA])
+
+ ;; Define code iterators
+-;; Define two incarnations so that we can build the cross product.
++;; Define two incarnations so that we can build the cartesian product.
+ (define_code_iterator any_extend [sign_extend zero_extend])
+ (define_code_iterator any_extend2 [sign_extend zero_extend])
+ (define_code_iterator any_extract [sign_extract zero_extract])
+ (define_code_iterator any_shiftrt [lshiftrt ashiftrt])
+
++(define_code_iterator piaop [plus ior and])
+ (define_code_iterator bitop [xor ior and])
+ (define_code_iterator xior [xor ior])
+ (define_code_iterator eqne [eq ne])
++(define_code_iterator gelt [ge lt])
+
+ (define_code_iterator ss_addsub [ss_plus ss_minus])
+ (define_code_iterator us_addsub [us_plus us_minus])
+@@ -309,6 +323,10 @@ (define_code_attr abelian
+ [(ss_minus "") (us_minus "")
+ (ss_plus "%") (us_plus "%")])
+
++(define_code_attr gelt_eqne
++ [(ge "eq")
++ (lt "ne")])
++
+ ;; Map RTX code to its standard insn name
+ (define_code_attr code_stdname
+ [(ashift "ashl")
+@@ -1529,9 +1547,8 @@ (define_insn_and_split "*usum_widenqihi3_split"
+ "#"
+ "&& reload_completed"
+ [(parallel [(set (match_dup 0)
+- (plus:HI
+- (zero_extend:HI (match_dup 1))
+- (zero_extend:HI (match_dup 2))))
++ (plus:HI (zero_extend:HI (match_dup 1))
++ (zero_extend:HI (match_dup 2))))
+ (clobber (reg:CC REG_CC))])])
+
+
+@@ -2152,7 +2169,8 @@ (define_insn "*mulqi3_enh"
+ (define_expand "mulqi3_call"
+ [(set (reg:QI 24) (match_operand:QI 1 "register_operand" ""))
+ (set (reg:QI 22) (match_operand:QI 2 "register_operand" ""))
+- (parallel [(set (reg:QI 24) (mult:QI (reg:QI 24) (reg:QI 22)))
++ (parallel [(set (reg:QI 24)
++ (mult:QI (reg:QI 24) (reg:QI 22)))
+ (clobber (reg:QI 22))])
+ (set (match_operand:QI 0 "register_operand" "") (reg:QI 24))]
+ ""
+@@ -2166,12 +2184,14 @@ (define_insn_and_split "*mulqi3_call_split"
+ "!AVR_HAVE_MUL"
+ "#"
+ "&& reload_completed"
+- [(parallel [(set (reg:QI 24) (mult:QI (reg:QI 24) (reg:QI 22)))
++ [(parallel [(set (reg:QI 24)
++ (mult:QI (reg:QI 24) (reg:QI 22)))
+ (clobber (reg:QI 22))
+ (clobber (reg:CC REG_CC))])])
+
+ (define_insn "*mulqi3_call"
+- [(set (reg:QI 24) (mult:QI (reg:QI 24) (reg:QI 22)))
++ [(set (reg:QI 24)
++ (mult:QI (reg:QI 24) (reg:QI 22)))
+ (clobber (reg:QI 22))
+ (clobber (reg:CC REG_CC))]
+ "!AVR_HAVE_MUL && reload_completed"
+@@ -2307,7 +2327,7 @@ (define_insn "*addpsi3.lt0"
+ [(set (match_operand:PSI 0 "register_operand" "=r")
+ (plus:PSI (lshiftrt:PSI (match_operand:PSI 1 "register_operand" "r")
+ (const_int 23))
+- (match_operand:PSI 2 "register_operand" "0")))
++ (match_operand:PSI 2 "register_operand" "0")))
+ (clobber (reg:CC REG_CC))]
+ "reload_completed"
+ "mov __tmp_reg__,%C1\;lsl __tmp_reg__
+@@ -2433,7 +2453,7 @@ (define_insn "*sumulqihi3"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (mult:HI (sign_extend:HI (match_operand:QI 1 "register_operand" "a"))
+ (zero_extend:HI (match_operand:QI 2 "register_operand" "a"))))
+- (clobber (reg:CC REG_CC))]
++ (clobber (reg:CC REG_CC))]
+ "AVR_HAVE_MUL && reload_completed"
+ "mulsu %1,%2
+ movw %0,r0
+@@ -3088,7 +3108,7 @@ (define_insn_and_split "muluqihi3"
+ [(parallel [(set (match_dup 0)
+ (mult:HI (zero_extend:HI (match_dup 1))
+ (match_dup 2)))
+- (clobber (reg:CC REG_CC))])])
++ (clobber (reg:CC REG_CC))])])
+
+ (define_insn "*muluqihi3"
+ [(set (match_operand:HI 0 "register_operand" "=&r")
+@@ -3706,17 +3726,17 @@ (define_insn "*mulohisi3_call"
+ ;; CSE has problems to operate on hard regs.
+ ;;
+ (define_insn_and_split "divmodqi4"
+- [(set (match_operand:QI 0 "pseudo_register_operand" "")
+- (div:QI (match_operand:QI 1 "pseudo_register_operand" "")
+- (match_operand:QI 2 "pseudo_register_operand" "")))
+- (set (match_operand:QI 3 "pseudo_register_operand" "")
++ [(set (match_operand:QI 0 "pseudo_register_operand")
++ (div:QI (match_operand:QI 1 "pseudo_register_operand")
++ (match_operand:QI 2 "pseudo_register_operand")))
++ (set (match_operand:QI 3 "pseudo_register_operand")
+ (mod:QI (match_dup 1) (match_dup 2)))
+ (clobber (reg:QI 22))
+ (clobber (reg:QI 23))
+ (clobber (reg:QI 24))
+ (clobber (reg:QI 25))]
+ ""
+- "this divmodqi4 pattern should have been splitted;"
++ { gcc_unreachable(); }
+ ""
+ [(set (reg:QI 24) (match_dup 1))
+ (set (reg:QI 22) (match_dup 2))
+@@ -3752,17 +3772,17 @@ (define_insn "*divmodqi4_call"
+ [(set_attr "type" "xcall")])
+
+ (define_insn_and_split "udivmodqi4"
+- [(set (match_operand:QI 0 "pseudo_register_operand" "")
+- (udiv:QI (match_operand:QI 1 "pseudo_register_operand" "")
+- (match_operand:QI 2 "pseudo_register_operand" "")))
+- (set (match_operand:QI 3 "pseudo_register_operand" "")
+- (umod:QI (match_dup 1) (match_dup 2)))
+- (clobber (reg:QI 22))
+- (clobber (reg:QI 23))
+- (clobber (reg:QI 24))
+- (clobber (reg:QI 25))]
+- ""
+- "this udivmodqi4 pattern should have been splitted;"
++ [(set (match_operand:QI 0 "pseudo_register_operand")
++ (udiv:QI (match_operand:QI 1 "pseudo_register_operand")
++ (match_operand:QI 2 "pseudo_register_operand")))
++ (set (match_operand:QI 3 "pseudo_register_operand")
++ (umod:QI (match_dup 1) (match_dup 2)))
++ (clobber (reg:QI 22))
++ (clobber (reg:QI 23))
++ (clobber (reg:QI 24))
++ (clobber (reg:QI 25))]
++ ""
++ { gcc_unreachable(); }
+ ""
+ [(set (reg:QI 24) (match_dup 1))
+ (set (reg:QI 22) (match_dup 2))
+@@ -3794,17 +3814,17 @@ (define_insn "*udivmodqi4_call"
+ [(set_attr "type" "xcall")])
+
+ (define_insn_and_split "divmodhi4"
+- [(set (match_operand:HI 0 "pseudo_register_operand" "")
+- (div:HI (match_operand:HI 1 "pseudo_register_operand" "")
+- (match_operand:HI 2 "pseudo_register_operand" "")))
+- (set (match_operand:HI 3 "pseudo_register_operand" "")
++ [(set (match_operand:HI 0 "pseudo_register_operand")
++ (div:HI (match_operand:HI 1 "pseudo_register_operand")
++ (match_operand:HI 2 "pseudo_register_operand")))
++ (set (match_operand:HI 3 "pseudo_register_operand")
+ (mod:HI (match_dup 1) (match_dup 2)))
+ (clobber (reg:QI 21))
+ (clobber (reg:HI 22))
+ (clobber (reg:HI 24))
+ (clobber (reg:HI 26))]
+ ""
+- "this should have been splitted;"
++ { gcc_unreachable(); }
+ ""
+ [(set (reg:HI 24) (match_dup 1))
+ (set (reg:HI 22) (match_dup 2))
+@@ -3840,17 +3860,17 @@ (define_insn "*divmodhi4_call"
+ [(set_attr "type" "xcall")])
+
+ (define_insn_and_split "udivmodhi4"
+- [(set (match_operand:HI 0 "pseudo_register_operand" "")
+- (udiv:HI (match_operand:HI 1 "pseudo_register_operand" "")
+- (match_operand:HI 2 "pseudo_register_operand" "")))
+- (set (match_operand:HI 3 "pseudo_register_operand" "")
++ [(set (match_operand:HI 0 "pseudo_register_operand")
++ (udiv:HI (match_operand:HI 1 "pseudo_register_operand")
++ (match_operand:HI 2 "pseudo_register_operand")))
++ (set (match_operand:HI 3 "pseudo_register_operand")
+ (umod:HI (match_dup 1) (match_dup 2)))
+ (clobber (reg:QI 21))
+ (clobber (reg:HI 22))
+ (clobber (reg:HI 24))
+ (clobber (reg:HI 26))]
+ ""
+- "this udivmodhi4 pattern should have been splitted.;"
++ { gcc_unreachable(); }
+ ""
+ [(set (reg:HI 24) (match_dup 1))
+ (set (reg:HI 22) (match_dup 2))
+@@ -3925,7 +3945,7 @@ (define_insn_and_split "*umulqihipsi3_split"
+ [(parallel [(set (match_dup 0)
+ (mult:PSI (zero_extend:PSI (match_dup 1))
+ (zero_extend:PSI (match_dup 2))))
+- (clobber (reg:CC REG_CC))])])
++ (clobber (reg:CC REG_CC))])])
+
+ (define_insn "*umulqihipsi3"
+ [(set (match_operand:PSI 0 "register_operand" "=&r")
+@@ -4091,14 +4111,14 @@ (define_insn "*mulpsi3.libgcc"
+ ;; implementation works the other way round.
+
+ (define_insn_and_split "divmodpsi4"
+- [(parallel [(set (match_operand:PSI 0 "pseudo_register_operand" "")
+- (div:PSI (match_operand:PSI 1 "pseudo_register_operand" "")
+- (match_operand:PSI 2 "pseudo_register_operand" "")))
+- (set (match_operand:PSI 3 "pseudo_register_operand" "")
+- (mod:PSI (match_dup 1)
+- (match_dup 2)))
+- (clobber (reg:DI 18))
+- (clobber (reg:QI 26))])]
++ [(set (match_operand:PSI 0 "pseudo_register_operand")
++ (div:PSI (match_operand:PSI 1 "pseudo_register_operand")
++ (match_operand:PSI 2 "pseudo_register_operand")))
++ (set (match_operand:PSI 3 "pseudo_register_operand")
++ (mod:PSI (match_dup 1)
++ (match_dup 2)))
++ (clobber (reg:DI 18))
++ (clobber (reg:QI 26))]
+ ""
+ { gcc_unreachable(); }
+ ""
+@@ -4140,14 +4160,14 @@ (define_insn "*divmodpsi4_call"
+ [(set_attr "type" "xcall")])
+
+ (define_insn_and_split "udivmodpsi4"
+- [(parallel [(set (match_operand:PSI 0 "pseudo_register_operand" "")
+- (udiv:PSI (match_operand:PSI 1 "pseudo_register_operand" "")
+- (match_operand:PSI 2 "pseudo_register_operand" "")))
+- (set (match_operand:PSI 3 "pseudo_register_operand" "")
+- (umod:PSI (match_dup 1)
+- (match_dup 2)))
+- (clobber (reg:DI 18))
+- (clobber (reg:QI 26))])]
++ [(set (match_operand:PSI 0 "pseudo_register_operand")
++ (udiv:PSI (match_operand:PSI 1 "pseudo_register_operand")
++ (match_operand:PSI 2 "pseudo_register_operand")))
++ (set (match_operand:PSI 3 "pseudo_register_operand")
++ (umod:PSI (match_dup 1)
++ (match_dup 2)))
++ (clobber (reg:DI 18))
++ (clobber (reg:QI 26))]
+ ""
+ { gcc_unreachable(); }
+ ""
+@@ -4191,17 +4211,18 @@ (define_insn "*udivmodpsi4_call"
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ (define_insn_and_split "divmodsi4"
+- [(parallel [(set (match_operand:SI 0 "pseudo_register_operand" "")
+- (div:SI (match_operand:SI 1 "pseudo_register_operand" "")
+- (match_operand:SI 2 "pseudo_register_operand" "")))
+- (set (match_operand:SI 3 "pseudo_register_operand" "")
+- (mod:SI (match_dup 1) (match_dup 2)))
+- (clobber (reg:SI 18))
+- (clobber (reg:SI 22))
+- (clobber (reg:HI 26))
+- (clobber (reg:HI 30))])]
++ [(set (match_operand:SI 0 "pseudo_register_operand")
++ (div:SI (match_operand:SI 1 "pseudo_register_operand")
++ (match_operand:SI 2 "pseudo_register_operand")))
++ (set (match_operand:SI 3 "pseudo_register_operand")
++ (mod:SI (match_dup 1)
++ (match_dup 2)))
++ (clobber (reg:SI 18))
++ (clobber (reg:SI 22))
++ (clobber (reg:HI 26))
++ (clobber (reg:HI 30))]
+ ""
+- "this divmodsi4 pattern should have been splitted;"
++ { gcc_unreachable(); }
+ ""
+ [(set (reg:SI 22) (match_dup 1))
+ (set (reg:SI 18) (match_dup 2))
+@@ -4237,17 +4258,18 @@ (define_insn "*divmodsi4_call"
+ [(set_attr "type" "xcall")])
+
+ (define_insn_and_split "udivmodsi4"
+- [(parallel [(set (match_operand:SI 0 "pseudo_register_operand" "")
+- (udiv:SI (match_operand:SI 1 "pseudo_register_operand" "")
+- (match_operand:SI 2 "pseudo_register_operand" "")))
+- (set (match_operand:SI 3 "pseudo_register_operand" "")
+- (umod:SI (match_dup 1) (match_dup 2)))
+- (clobber (reg:SI 18))
+- (clobber (reg:SI 22))
+- (clobber (reg:HI 26))
+- (clobber (reg:HI 30))])]
++ [(set (match_operand:SI 0 "pseudo_register_operand")
++ (udiv:SI (match_operand:SI 1 "pseudo_register_operand")
++ (match_operand:SI 2 "pseudo_register_operand")))
++ (set (match_operand:SI 3 "pseudo_register_operand")
++ (umod:SI (match_dup 1)
++ (match_dup 2)))
++ (clobber (reg:SI 18))
++ (clobber (reg:SI 22))
++ (clobber (reg:HI 26))
++ (clobber (reg:HI 30))]
+ ""
+- "this udivmodsi4 pattern should have been splitted;"
++ { gcc_unreachable(); }
+ ""
+ [(set (reg:SI 22) (match_dup 1))
+ (set (reg:SI 18) (match_dup 2))
+@@ -4712,7 +4734,8 @@ (define_split
+ [(parallel [(set (match_operand:HISI 0 "register_operand")
+ (bitop:HISI (match_dup 0)
+ (match_operand:HISI 1 "register_operand")))
+- (clobber (scratch:QI))])]
++ (clobber (scratch:QI))
++ (clobber (reg:CC REG_CC))])]
+ "optimize
+ && reload_completed"
+ [(const_int 1)]
+@@ -4726,6 +4749,43 @@ (define_split
+ DONE;
+ })
+
++;; If $0 = $0 <op> const requires a QI scratch, and d-reg $1 dies after
++;; the first insn, then we can replace
++;; $0 = $1
++;; $0 = $0 <op> const
++;; by
++;; $1 = $1 <op> const
++;; $0 = $1
++;; This transorms constraint alternative "r,0,n,&d" of the first operation
++;; to alternative "d,0,n,X".
++;; "*addhi3_clobber" "*addpsi3" "*addsi3"
++;; "*addhq3" "*adduhq3" "*addha3" "*adduha3"
++;; "*addsq3" "*addusq3" "*addsa3" "*addusa3"
++;; "*iorhi3" "*iorpsi3" "*iorsi3"
++;; "*andhi3" "*andpsi3" "*andsi3"
++(define_peephole2
++ [(parallel [(set (match_operand:ORDERED234 0 "register_operand")
++ (match_operand:ORDERED234 1 "d_register_operand"))
++ (clobber (reg:CC REG_CC))])
++ (parallel [(set (match_dup 0)
++ (piaop:ORDERED234 (match_dup 0)
++ (match_operand:ORDERED234 2 "const_operand")))
++ ; A d-reg as scratch tells that this insn is expensive, and
++ ; that $0 is not a d-register: l-reg or something like SI:14 etc.
++ (clobber (match_operand:QI 3 "d_register_operand"))
++ (clobber (reg:CC REG_CC))])]
++ "peep2_reg_dead_p (1, operands[1])"
++ [(parallel [(set (match_dup 1)
++ (piaop:ORDERED234 (match_dup 1)
++ (match_dup 2)))
++ (clobber (scratch:QI))
++ (clobber (reg:CC REG_CC))])
++ ; Unfortunately, the following insn misses a REG_DEAD note for $1,
++ ; so this peep2 works only once.
++ (parallel [(set (match_dup 0)
++ (match_dup 1))
++ (clobber (reg:CC REG_CC))])])
++
+
+ ;; swap swap swap swap swap swap swap swap swap swap swap swap swap swap swap
+ ;; swap
+@@ -5684,7 +5744,7 @@ (define_insn "*lshr<mode>3"
+ ;; "lshrha3" "lshruha3"
+ (define_insn_and_split "lshr<mode>3"
+ [(set (match_operand:ALL2 0 "register_operand" "=r,r,r,r,r,r,r")
+- (lshiftrt:ALL2 (match_operand:ALL2 1 "register_operand" "0,0,0,r,0,0,0")
++ (lshiftrt:ALL2 (match_operand:ALL2 1 "register_operand" "0,0,0,r,0,0,0")
+ (match_operand:QI 2 "nop_general_operand" "r,L,P,O,K,n,Qm")))]
+ ""
+ "#"
+@@ -5696,7 +5756,7 @@ (define_insn_and_split "lshr<mode>3"
+
+ (define_insn "*lshr<mode>3"
+ [(set (match_operand:ALL2 0 "register_operand" "=r,r,r,r,r,r,r")
+- (lshiftrt:ALL2 (match_operand:ALL2 1 "register_operand" "0,0,0,r,0,0,0")
++ (lshiftrt:ALL2 (match_operand:ALL2 1 "register_operand" "0,0,0,r,0,0,0")
+ (match_operand:QI 2 "nop_general_operand" "r,L,P,O,K,n,Qm")))
+ (clobber (reg:CC REG_CC))]
+ "reload_completed"
+@@ -6449,80 +6509,41 @@ (define_insn_and_split "zero_extendsidi2"
+ ;;<=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=>
+ ;; compare
+
+-; Optimize negated tests into reverse compare if overflow is undefined.
+-(define_insn "*negated_tstqi"
++;; "*swapped_tstqi" "*swapped_tstqq"
++(define_insn "*swapped_tst<mode>"
+ [(set (reg:CC REG_CC)
+- (compare:CC (neg:QI (match_operand:QI 0 "register_operand" "r"))
+- (const_int 0)))]
+- "reload_completed && !flag_wrapv && !flag_trapv"
+- "cp __zero_reg__,%0"
+- [(set_attr "length" "1")])
+-
+-(define_insn "*reversed_tstqi"
+- [(set (reg:CC REG_CC)
+- (compare:CC (const_int 0)
+- (match_operand:QI 0 "register_operand" "r")))]
++ (compare:CC (match_operand:ALLs1 0 "const0_operand" "Y00")
++ (match_operand:ALLs1 1 "register_operand" "r")))]
+ "reload_completed"
+- "cp __zero_reg__,%0"
+-[(set_attr "length" "2")])
++ "cp __zero_reg__,%1"
++[(set_attr "length" "1")])
+
+-(define_insn "*negated_tsthi"
+- [(set (reg:CC REG_CC)
+- (compare:CC (neg:HI (match_operand:HI 0 "register_operand" "r"))
+- (const_int 0)))]
+- "reload_completed && !flag_wrapv && !flag_trapv"
+- "cp __zero_reg__,%A0
+- cpc __zero_reg__,%B0"
+-[(set_attr "length" "2")])
+-
+-;; Leave here the clobber used by the cmphi pattern for simplicity, even
+-;; though it is unused, because this pattern is synthesized by avr_reorg.
+-(define_insn "*reversed_tsthi"
++
++;; "*swapped_tsthi" "*swapped_tsthq" "*swapped_tstha"
++(define_insn "*swapped_tst<mode>"
+ [(set (reg:CC REG_CC)
+- (compare:CC (const_int 0)
+- (match_operand:HI 0 "register_operand" "r")))
+- (clobber (match_scratch:QI 1 "=X"))]
++ (compare:CC (match_operand:ALLs2 0 "const0_operand" "Y00")
++ (match_operand:ALLs2 1 "register_operand" "r")))]
+ "reload_completed"
+- "cp __zero_reg__,%A0
+- cpc __zero_reg__,%B0"
+-[(set_attr "length" "2")])
++ "cp __zero_reg__,%A1
++ cpc __zero_reg__,%B1"
++ [(set_attr "length" "2")])
+
+-(define_insn "*negated_tstpsi"
+- [(set (reg:CC REG_CC)
+- (compare:CC (neg:PSI (match_operand:PSI 0 "register_operand" "r"))
+- (const_int 0)))]
+- "reload_completed && !flag_wrapv && !flag_trapv"
+- "cp __zero_reg__,%A0\;cpc __zero_reg__,%B0\;cpc __zero_reg__,%C0"
+- [(set_attr "length" "3")])
+
+-(define_insn "*reversed_tstpsi"
++(define_insn "*swapped_tstpsi"
+ [(set (reg:CC REG_CC)
+ (compare:CC (const_int 0)
+- (match_operand:PSI 0 "register_operand" "r")))
+- (clobber (match_scratch:QI 1 "=X"))]
++ (match_operand:PSI 0 "register_operand" "r")))]
+ "reload_completed"
+ "cp __zero_reg__,%A0\;cpc __zero_reg__,%B0\;cpc __zero_reg__,%C0"
+ [(set_attr "length" "3")])
+
+-(define_insn "*negated_tstsi"
+- [(set (reg:CC REG_CC)
+- (compare:CC (neg:SI (match_operand:SI 0 "register_operand" "r"))
+- (const_int 0)))]
+- "reload_completed && !flag_wrapv && !flag_trapv"
+- "cp __zero_reg__,%A0
+- cpc __zero_reg__,%B0
+- cpc __zero_reg__,%C0
+- cpc __zero_reg__,%D0"
+- [(set_attr "length" "4")])
+
+-;; "*reversed_tstsi"
+-;; "*reversed_tstsq" "*reversed_tstusq"
+-;; "*reversed_tstsa" "*reversed_tstusa"
+-(define_insn "*reversed_tst<mode>"
++;; "*swapped_tstsi" "*swapped_tstsq" "*swapped_tstsa"
++(define_insn "*swapped_tst<mode>"
+ [(set (reg:CC REG_CC)
+- (compare:CC (match_operand:ALL4 0 "const0_operand" "Y00")
+- (match_operand:ALL4 1 "register_operand" "r")))
+- (clobber (match_scratch:QI 2 "=X"))]
++ (compare:CC (match_operand:ALLs4 0 "const0_operand" "Y00")
++ (match_operand:ALLs4 1 "register_operand" "r")))]
+ "reload_completed"
+ "cp __zero_reg__,%A1
+ cpc __zero_reg__,%B1
+@@ -6536,38 +6557,40 @@ (define_insn "*reversed_tst<mode>"
+ (define_insn "cmp<mode>3"
+ [(set (reg:CC REG_CC)
+ (compare:CC (match_operand:ALL1 0 "register_operand" "r ,r,d")
+- (match_operand:ALL1 1 "nonmemory_operand" "Y00,r,i")))]
++ (match_operand:ALL1 1 "nonmemory_operand" "Y00,r,i")))]
+ "reload_completed"
+ "@
+- tst %0
++ cp %0, __zero_reg__
+ cp %0,%1
+ cpi %0,lo8(%1)"
+ [(set_attr "length" "1,1,1")])
+
+-(define_insn "*cmpqi_sign_extend"
+- [(set (reg:CC REG_CC)
+- (compare:CC (sign_extend:HI (match_operand:QI 0 "register_operand" "d"))
+- (match_operand:HI 1 "s8_operand" "n")))]
+- "reload_completed"
+- "cpi %0,lo8(%1)"
+- [(set_attr "length" "1")])
+
+-
+-(define_insn "*cmphi.zero-extend.0"
++;; May be generated by "*cbranch<HISI:mode>.<code><QIPSI:mode>.0/1".
++(define_insn "*cmp<HISI:mode>.<code><QIPSI:mode>.0"
+ [(set (reg:CC REG_CC)
+- (compare:CC (zero_extend:HI (match_operand:QI 0 "register_operand" "r"))
+- (match_operand:HI 1 "register_operand" "r")))]
+- "reload_completed"
+- "cp %0,%A1\;cpc __zero_reg__,%B1"
+- [(set_attr "length" "2")])
++ (compare:CC (any_extend:HISI (match_operand:QIPSI 0 "register_operand" "r"))
++ (match_operand:HISI 1 "register_operand" "r")))]
++ "reload_completed
++ && GET_MODE_SIZE (<HISI:MODE>mode) > GET_MODE_SIZE (<QIPSI:MODE>mode)"
++ {
++ return avr_out_cmp_ext (operands, <CODE>, nullptr);
++ }
++ [(set_attr "adjust_len" "cmp_<extend_su>ext")])
+
+-(define_insn "*cmphi.zero-extend.1"
++;; Swapped version of the above.
++;; May be generated by "*cbranch<HISI:mode>.<code><QIPSI:mode>.0/1".
++(define_insn "*cmp<HISI:mode>.<code><QIPSI:mode>.1"
+ [(set (reg:CC REG_CC)
+- (compare:CC (match_operand:HI 0 "register_operand" "r")
+- (zero_extend:HI (match_operand:QI 1 "register_operand" "r"))))]
+- "reload_completed"
+- "cp %A0,%1\;cpc %B0,__zero_reg__"
+- [(set_attr "length" "2")])
++ (compare:CC (match_operand:HISI 0 "register_operand" "r")
++ (any_extend:HISI (match_operand:QIPSI 1 "register_operand" "r"))))]
++ "reload_completed
++ && GET_MODE_SIZE (<HISI:MODE>mode) > GET_MODE_SIZE (<QIPSI:MODE>mode)"
++ {
++ return avr_out_cmp_ext (operands, <CODE>, nullptr);
++ }
++ [(set_attr "adjust_len" "cmp_<extend_su>ext")])
++
+
+ ;; "cmphi3"
+ ;; "cmphq3" "cmpuhq3"
+@@ -6575,8 +6598,8 @@ (define_insn "*cmphi.zero-extend.1"
+ (define_insn "cmp<mode>3"
+ [(set (reg:CC REG_CC)
+ (compare:CC (match_operand:ALL2 0 "register_operand" "!w ,r ,r,d ,r ,d,r")
+- (match_operand:ALL2 1 "nonmemory_operand" "Y00,Y00,r,s ,s ,M,n Ynn")))
+- (clobber (match_scratch:QI 2 "=X ,X ,X,&d,&d ,X,&d"))]
++ (match_operand:ALL2 1 "nonmemory_operand" "Y00,Y00,r,s ,s ,M,n Ynn")))
++ (clobber (match_scratch:QI 2 "=X ,X ,X,&d,&d ,X,&d"))]
+ "reload_completed"
+ {
+ switch (which_alternative)
+@@ -6603,14 +6626,14 @@ (define_insn "cmp<mode>3"
+
+ return avr_out_compare (insn, operands, NULL);
+ }
+- [(set_attr "length" "1,2,2,3,4,2,4")
++ [(set_attr "length" "2,2,2,3,4,2,4")
+ (set_attr "adjust_len" "tsthi,tsthi,*,*,*,compare,compare")])
+
+ (define_insn "*cmppsi"
+ [(set (reg:CC REG_CC)
+ (compare:CC (match_operand:PSI 0 "register_operand" "r,r,d ,r ,d,r")
+- (match_operand:PSI 1 "nonmemory_operand" "L,r,s ,s ,M,n")))
+- (clobber (match_scratch:QI 2 "=X,X,&d,&d ,X,&d"))]
++ (match_operand:PSI 1 "nonmemory_operand" "L,r,s ,s ,M,n")))
++ (clobber (match_scratch:QI 2 "=X,X,&d,&d ,X,&d"))]
+ "reload_completed"
+ {
+ switch (which_alternative)
+@@ -6641,8 +6664,8 @@ (define_insn "*cmppsi"
+ (define_insn "*cmp<mode>"
+ [(set (reg:CC REG_CC)
+ (compare:CC (match_operand:ALL4 0 "register_operand" "r ,r ,d,r ,r")
+- (match_operand:ALL4 1 "nonmemory_operand" "Y00,r ,M,M ,n Ynn")))
+- (clobber (match_scratch:QI 2 "=X ,X ,X,&d,&d"))]
++ (match_operand:ALL4 1 "nonmemory_operand" "Y00,r ,M,M ,n Ynn")))
++ (clobber (match_scratch:QI 2 "=X ,X ,X,&d,&d"))]
+ "reload_completed"
+ {
+ if (0 == which_alternative)
+@@ -6656,6 +6679,13 @@ (define_insn "*cmp<mode>"
+ (set_attr "adjust_len" "tstsi,*,compare,compare,compare")])
+
+
++;; A helper for avr_pass_ifelse::avr_rest_of_handle_ifelse().
++(define_expand "gen_compare<mode>"
++ [(parallel [(set (reg:CC REG_CC)
++ (compare:CC (match_operand:HISI 0 "register_operand")
++ (match_operand:HISI 1 "const_int_operand")))
++ (clobber (match_operand:QI 2 "scratch_operand"))])])
++
+ ;; ----------------------------------------------------------------------
+ ;; JUMP INSTRUCTIONS
+ ;; ----------------------------------------------------------------------
+@@ -6664,53 +6694,67 @@ (define_insn "*cmp<mode>"
+ (define_expand "cbranch<mode>4"
+ [(set (pc)
+ (if_then_else (match_operator 0 "ordered_comparison_operator"
+- [(match_operand:ALL1 1 "register_operand" "")
+- (match_operand:ALL1 2 "nonmemory_operand" "")])
+- (label_ref (match_operand 3 "" ""))
+- (pc)))])
++ [(match_operand:ALL1 1 "register_operand")
++ (match_operand:ALL1 2 "nonmemory_operand")])
++ (label_ref (match_operand 3))
++ (pc)))]
++ ""
++ {
++ int icode = (int) GET_CODE (operands[0]);
++
++ targetm.canonicalize_comparison (&icode, &operands[1], &operands[2], false);
++ PUT_CODE (operands[0], (enum rtx_code) icode);
++ })
+
+ (define_expand "cbranch<mode>4"
+ [(parallel
+ [(set (pc)
+- (if_then_else
+- (match_operator 0 "ordered_comparison_operator"
+- [(match_operand:ORDERED234 1 "register_operand" "")
+- (match_operand:ORDERED234 2 "nonmemory_operand" "")])
+- (label_ref (match_operand 3 "" ""))
+- (pc)))
+- (clobber (match_scratch:QI 4 ""))])])
+-
+-;; "*cbranchqi4"
+-;; "*cbranchqq4" "*cbranchuqq4"
+-(define_insn_and_split "*cbranch<mode>4"
++ (if_then_else (match_operator 0 "ordered_comparison_operator"
++ [(match_operand:ALL234 1 "register_operand")
++ (match_operand:ALL234 2 "nonmemory_operand")])
++ (label_ref (match_operand 3))
++ (pc)))
++ (clobber (match_scratch:QI 4))])]
++ ""
++ {
++ int icode = (int) GET_CODE (operands[0]);
++
++ targetm.canonicalize_comparison (&icode, &operands[1], &operands[2], false);
++ PUT_CODE (operands[0], (enum rtx_code) icode);
++ })
++
++
++;; "cbranchqi4_insn"
++;; "cbranchqq4_insn" "cbranchuqq4_insn"
++(define_insn_and_split "cbranch<mode>4_insn"
+ [(set (pc)
+ (if_then_else (match_operator 0 "ordered_comparison_operator"
+- [(match_operand:ALL1 1 "register_operand" "r ,r,d")
++ [(match_operand:ALL1 1 "register_operand" "r ,r,d")
+ (match_operand:ALL1 2 "nonmemory_operand" "Y00,r,i")])
+- (label_ref (match_operand 3 "" ""))
+- (pc)))]
++ (label_ref (match_operand 3))
++ (pc)))]
+ ""
+ "#"
+ "reload_completed"
+ [(set (reg:CC REG_CC)
+- (compare:CC (match_dup 1) (match_dup 2)))
++ (compare:CC (match_dup 1) (match_dup 2)))
+ (set (pc)
+ (if_then_else (match_op_dup 0
+ [(reg:CC REG_CC) (const_int 0)])
+ (label_ref (match_dup 3))
+- (pc)))]
+- "")
++ (pc)))])
+
+-;; "*cbranchsi4" "*cbranchsq4" "*cbranchusq4" "*cbranchsa4" "*cbranchusa4"
+-(define_insn_and_split "*cbranch<mode>4"
++;; "cbranchsi4_insn"
++;; "cbranchsq4_insn" "cbranchusq4_insn" "cbranchsa4_insn" "cbranchusa4_insn"
++(define_insn_and_split "cbranch<mode>4_insn"
+ [(set (pc)
+- (if_then_else
+- (match_operator 0 "ordered_comparison_operator"
+- [(match_operand:ALL4 1 "register_operand" "r ,r ,d,r ,r")
+- (match_operand:ALL4 2 "nonmemory_operand" "Y00,r ,M,M ,n Ynn")])
+- (label_ref (match_operand 3 "" ""))
+- (pc)))
+- (clobber (match_scratch:QI 4 "=X ,X ,X,&d,&d"))]
++ (if_then_else
++ (match_operator 0 "ordered_comparison_operator"
++ [(match_operand:ALL4 1 "register_operand" "r ,r,d,r ,r")
++ (match_operand:ALL4 2 "nonmemory_operand" "Y00,r,M,M ,n Ynn")])
++ (label_ref (match_operand 3))
++ (pc)))
++ (clobber (match_scratch:QI 4 "=X ,X,X,&d,&d"))]
+ ""
+ "#"
+ "reload_completed"
+@@ -6721,19 +6765,18 @@ (define_insn_and_split "*cbranch<mode>4"
+ (if_then_else (match_op_dup 0
+ [(reg:CC REG_CC) (const_int 0)])
+ (label_ref (match_dup 3))
+- (pc)))]
+- "")
++ (pc)))])
+
+-;; "*cbranchpsi4"
+-(define_insn_and_split "*cbranchpsi4"
++;; "cbranchpsi4_insn"
++(define_insn_and_split "cbranchpsi4_insn"
+ [(set (pc)
+- (if_then_else
+- (match_operator 0 "ordered_comparison_operator"
+- [(match_operand:PSI 1 "register_operand" "r,r,d ,r ,d,r")
+- (match_operand:PSI 2 "nonmemory_operand" "L,r,s ,s ,M,n")])
+- (label_ref (match_operand 3 "" ""))
+- (pc)))
+- (clobber (match_scratch:QI 4 "=X,X,&d,&d ,X,&d"))]
++ (if_then_else
++ (match_operator 0 "ordered_comparison_operator"
++ [(match_operand:PSI 1 "register_operand" "r,r,d ,r ,d,r")
++ (match_operand:PSI 2 "nonmemory_operand" "L,r,s ,s ,M,n")])
++ (label_ref (match_operand 3))
++ (pc)))
++ (clobber (match_scratch:QI 4 "=X,X,&d,&d,X,&d"))]
+ ""
+ "#"
+ "reload_completed"
+@@ -6744,19 +6787,19 @@ (define_insn_and_split "*cbranchpsi4"
+ (if_then_else (match_op_dup 0
+ [(reg:CC REG_CC) (const_int 0)])
+ (label_ref (match_dup 3))
+- (pc)))]
+- "")
++ (pc)))])
+
+-;; "*cbranchhi4" "*cbranchhq4" "*cbranchuhq4" "*cbranchha4" "*cbranchuha4"
+-(define_insn_and_split "*cbranch<mode>4"
++;; "cbranchhi4_insn"
++;; "cbranchhq4_insn" "cbranchuhq4_insn" "cbranchha4_insn" "cbranchuha4_insn"
++(define_insn_and_split "cbranch<mode>4_insn"
+ [(set (pc)
+- (if_then_else
+- (match_operator 0 "ordered_comparison_operator"
+- [(match_operand:ALL2 1 "register_operand" "!w ,r ,r,d ,r ,d,r")
+- (match_operand:ALL2 2 "nonmemory_operand" "Y00,Y00,r,s ,s ,M,n Ynn")])
+- (label_ref (match_operand 3 "" ""))
+- (pc)))
+- (clobber (match_scratch:QI 4 "=X ,X ,X,&d,&d ,X,&d"))]
++ (if_then_else
++ (match_operator 0 "ordered_comparison_operator"
++ [(match_operand:ALL2 1 "register_operand" "!w ,r ,r,d ,r ,d,r")
++ (match_operand:ALL2 2 "nonmemory_operand" "Y00,Y00,r,s ,s ,M,n Ynn")])
++ (label_ref (match_operand 3))
++ (pc)))
++ (clobber (match_scratch:QI 4 "=X ,X ,X,&d,&d,X,&d"))]
+ ""
+ "#"
+ "reload_completed"
+@@ -6767,8 +6810,71 @@ (define_insn_and_split "*cbranch<mode>4"
+ (if_then_else (match_op_dup 0
+ [(reg:CC REG_CC) (const_int 0)])
+ (label_ref (match_dup 3))
+- (pc)))]
+- "")
++ (pc)))])
++
++;; Combiner pattern to compare sign- or zero-extended register against
++;; a wider register, like comparing uint8_t against uint16_t.
++(define_insn_and_split "*cbranch<HISI:mode>.<code><QIPSI:mode>.0"
++ [(set (pc)
++ (if_then_else (match_operator 0 "ordered_comparison_operator"
++ [(any_extend:HISI (match_operand:QIPSI 1 "register_operand" "r"))
++ (match_operand:HISI 2 "register_operand" "r")])
++ (label_ref (match_operand 3))
++ (pc)))]
++ "optimize
++ && GET_MODE_SIZE (<HISI:MODE>mode) > GET_MODE_SIZE (<QIPSI:MODE>mode)"
++ "#"
++ "&& reload_completed"
++ [; "*cmp<HISI:mode>.<code><QIPSI:mode>.0"
++ (set (reg:CC REG_CC)
++ (compare:CC (match_dup 1)
++ (match_dup 2)))
++ ; "branch"
++ (set (pc)
++ (if_then_else (match_op_dup 0 [(reg:CC REG_CC)
++ (const_int 0)])
++ (label_ref (match_dup 3))
++ (pc)))]
++ {
++ operands[1] = gen_rtx_<CODE> (<HISI:MODE>mode, operands[1]);
++ if (difficult_comparison_operator (operands[0], VOIDmode))
++ {
++ PUT_CODE (operands[0], swap_condition (GET_CODE (operands[0])));
++ std::swap (operands[1], operands[2]);
++ }
++ })
++
++;; Same combiner pattern, but with swapped operands.
++(define_insn_and_split "*cbranch<HISI:mode>.<code><QIPSI:mode>.0"
++ [(set (pc)
++ (if_then_else (match_operator 0 "ordered_comparison_operator"
++ [(match_operand:HISI 1 "register_operand" "r")
++ (any_extend:HISI (match_operand:QIPSI 2 "register_operand" "r"))])
++ (label_ref (match_operand 3))
++ (pc)))]
++ "optimize
++ && GET_MODE_SIZE (<HISI:MODE>mode) > GET_MODE_SIZE (<QIPSI:MODE>mode)"
++ "#"
++ "&& reload_completed"
++ [; "*cmp<HISI:mode>.<code><QIPSI:mode>.0"
++ (set (reg:CC REG_CC)
++ (compare:CC (match_dup 1)
++ (match_dup 2)))
++ ; "branch"
++ (set (pc)
++ (if_then_else (match_op_dup 0 [(reg:CC REG_CC)
++ (const_int 0)])
++ (label_ref (match_dup 3))
++ (pc)))]
++ {
++ operands[2] = gen_rtx_<CODE> (<HISI:MODE>mode, operands[2]);
++ if (difficult_comparison_operator (operands[0], VOIDmode))
++ {
++ PUT_CODE (operands[0], swap_condition (GET_CODE (operands[0])));
++ std::swap (operands[1], operands[2]);
++ }
++ })
++
+
+ ;; Test a single bit in a QI/HI/SImode register.
+ ;; Combine will create zero extract patterns for single bit tests.
+@@ -6842,14 +6948,11 @@ (define_insn_and_split "*sbrx_and_branch<mode>_split"
+ "#"
+ "&& reload_completed"
+ [(parallel [(set (pc)
+- (if_then_else
+- (match_op_dup 0
+- [(and:QISI
+- (match_dup 1)
+- (match_dup 2))
+- (const_int 0)])
+- (label_ref (match_dup 3))
+- (pc)))
++ (if_then_else (match_op_dup 0 [(and:QISI (match_dup 1)
++ (match_dup 2))
++ (const_int 0)])
++ (label_ref (match_dup 3))
++ (pc)))
+ (clobber (reg:CC REG_CC))])])
+
+ (define_insn "*sbrx_and_branch<mode>"
+@@ -6878,163 +6981,77 @@ (define_insn "*sbrx_and_branch<mode>"
+ (const_int 2)
+ (const_int 4))))])
+
+-;; Convert sign tests to bit 7/15/31 tests that match the above insns.
+-(define_peephole2
+- [(set (reg:CC REG_CC) (compare:CC (match_operand:QI 0 "register_operand" "")
+- (const_int 0)))
+- (parallel [(set (pc) (if_then_else (ge (reg:CC REG_CC) (const_int 0))
+- (label_ref (match_operand 1 "" ""))
+- (pc)))
+- (clobber (reg:CC REG_CC))])]
+- ""
+- [(parallel [(set (pc) (if_then_else (eq (zero_extract:HI (match_dup 0)
+- (const_int 1)
+- (const_int 7))
+- (const_int 0))
+- (label_ref (match_dup 1))
+- (pc)))
+- (clobber (reg:CC REG_CC))])])
+
+-(define_peephole2
+- [(set (reg:CC REG_CC) (compare:CC (match_operand:QI 0 "register_operand" "")
+- (const_int 0)))
+- (parallel [(set (pc) (if_then_else (lt (reg:CC REG_CC) (const_int 0))
+- (label_ref (match_operand 1 "" ""))
+- (pc)))
+- (clobber (reg:CC REG_CC))])]
+- ""
+- [(parallel [(set (pc) (if_then_else (ne (zero_extract:HI (match_dup 0)
+- (const_int 1)
+- (const_int 7))
+- (const_int 0))
+- (label_ref (match_dup 1))
+- (pc)))
+- (clobber (reg:CC REG_CC))])])
+-
+-(define_peephole2
+- [(parallel [(set (reg:CC REG_CC) (compare:CC (match_operand:HI 0 "register_operand" "")
+- (const_int 0)))
+- (clobber (match_operand:HI 2 ""))])
+- (parallel [(set (pc) (if_then_else (ge (reg:CC REG_CC) (const_int 0))
+- (label_ref (match_operand 1 "" ""))
+- (pc)))
+- (clobber (reg:CC REG_CC))])]
+- ""
+- [(parallel [(set (pc) (if_then_else (eq (and:HI (match_dup 0) (const_int -32768))
+- (const_int 0))
+- (label_ref (match_dup 1))
+- (pc)))
+- (clobber (reg:CC REG_CC))])])
+-
+-(define_peephole2
+- [(parallel [(set (reg:CC REG_CC) (compare:CC (match_operand:HI 0 "register_operand" "")
+- (const_int 0)))
+- (clobber (match_operand:HI 2 ""))])
+- (parallel [(set (pc) (if_then_else (lt (reg:CC REG_CC) (const_int 0))
+- (label_ref (match_operand 1 "" ""))
+- (pc)))
++;; Convert sign tests to bit 7 tests that match the above insns.
++(define_peephole2 ; "*sbrx_branch<mode>"
++ [(set (reg:CC REG_CC)
++ (compare:CC (match_operand:ALLs1 0 "register_operand")
++ (match_operand:ALLs1 1 "const0_operand")))
++ (set (pc)
++ (if_then_else (gelt (reg:CC REG_CC)
++ (const_int 0))
++ (label_ref (match_operand 2))
++ (pc)))]
++ "peep2_regno_dead_p (2, REG_CC)"
++ [(parallel [(set (pc)
++ (if_then_else (<gelt_eqne> (zero_extract:HI (match_dup 0)
++ (const_int 1)
++ (match_dup 1))
++ (const_int 0))
++ (label_ref (match_dup 2))
++ (pc)))
+ (clobber (reg:CC REG_CC))])]
+- ""
+- [(parallel [(set (pc) (if_then_else (ne (and:HI (match_dup 0) (const_int -32768))
+- (const_int 0))
+- (label_ref (match_dup 1))
+- (pc)))
+- (clobber (reg:CC REG_CC))])])
++ {
++ operands[0] = avr_to_int_mode (operands[0]);
++ operands[1] = GEN_INT (GET_MODE_BITSIZE (<MODE>mode) - 1);
++ })
+
+-(define_peephole2
+- [(parallel [(set (reg:CC REG_CC) (compare:CC (match_operand:SI 0 "register_operand" "")
+- (const_int 0)))
+- (clobber (match_operand:SI 2 ""))])
+- (parallel [(set (pc) (if_then_else (ge (reg:CC REG_CC) (const_int 0))
+- (label_ref (match_operand 1 "" ""))
+- (pc)))
+- (clobber (reg:CC REG_CC))])]
+- ""
+- [(parallel [(set (pc) (if_then_else (eq (and:SI (match_dup 0) (match_dup 2))
+- (const_int 0))
+- (label_ref (match_dup 1))
+- (pc)))
++;; Convert sign tests to bit 15/23/31 tests that match the above insns.
++(define_peephole2 ; "*sbrx_branch<mode>"
++ [(parallel [(set (reg:CC REG_CC)
++ (compare:CC (match_operand:ALLs234 0 "register_operand")
++ (match_operand:ALLs234 1 "const0_operand")))
++ (clobber (match_operand:QI 3 "scratch_operand"))])
++ (set (pc)
++ (if_then_else (gelt (reg:CC REG_CC)
++ (const_int 0))
++ (label_ref (match_operand 2))
++ (pc)))]
++ "peep2_regno_dead_p (2, REG_CC)"
++ [(parallel [(set (pc)
++ (if_then_else (<gelt_eqne> (zero_extract:HI (match_dup 0)
++ (const_int 1)
++ (match_dup 1))
++ (const_int 0))
++ (label_ref (match_dup 2))
++ (pc)))
+ (clobber (reg:CC REG_CC))])]
+- "operands[2] = gen_int_mode (-2147483647 - 1, SImode);")
++ {
++ operands[0] = avr_to_int_mode (operands[0]);
++ operands[1] = GEN_INT (GET_MODE_BITSIZE (<MODE>mode) - 1);
++ })
+
+-(define_peephole2
+- [(parallel [(set (reg:CC REG_CC) (compare:CC (match_operand:SI 0 "register_operand" "")
+- (const_int 0)))
+- (clobber (match_operand:SI 2 ""))])
+- (parallel [(set (pc) (if_then_else (lt (reg:CC REG_CC) (const_int 0))
+- (label_ref (match_operand 1 "" ""))
+- (pc)))
+- (clobber (reg:CC REG_CC))])]
+- ""
+- [(parallel [(set (pc) (if_then_else (ne (and:SI (match_dup 0) (match_dup 2))
+- (const_int 0))
+- (label_ref (match_dup 1))
+- (pc)))
+- (clobber (reg:CC REG_CC))])]
+- "operands[2] = gen_int_mode (-2147483647 - 1, SImode);")
+
+ ;; ************************************************************************
+ ;; Implementation of conditional jumps here.
+ ;; Compare with 0 (test) jumps
+ ;; ************************************************************************
+
+-(define_insn_and_split "branch"
++(define_insn "branch"
+ [(set (pc)
+ (if_then_else (match_operator 1 "simple_comparison_operator"
+- [(reg:CC REG_CC)
+- (const_int 0)])
+- (label_ref (match_operand 0 "" ""))
++ [(reg:CC REG_CC)
++ (const_int 0)])
++ (label_ref (match_operand 0))
+ (pc)))]
+ "reload_completed"
+- "#"
+- "&& reload_completed"
+- [(parallel [(set (pc)
+- (if_then_else (match_op_dup 1
+- [(reg:CC REG_CC)
+- (const_int 0)])
+- (label_ref (match_dup 0))
+- (pc)))
+- (clobber (reg:CC REG_CC))])])
+-
+-(define_insn "*branch"
+- [(set (pc)
+- (if_then_else (match_operator 1 "simple_comparison_operator"
+- [(reg:CC REG_CC)
+- (const_int 0)])
+- (label_ref (match_operand 0 "" ""))
+- (pc)))
+- (clobber (reg:CC REG_CC))]
+- "reload_completed"
+- {
+- return ret_cond_branch (operands[1], avr_jump_mode (operands[0], insn), 0);
+- }
+- [(set_attr "type" "branch")])
+-
+-
+-;; Same as above but wrap SET_SRC so that this branch won't be transformed
+-;; or optimized in the remainder.
+-
+-(define_insn "branch_unspec"
+- [(set (pc)
+- (unspec [(if_then_else (match_operator 1 "simple_comparison_operator"
+- [(reg:CC REG_CC)
+- (const_int 0)])
+- (label_ref (match_operand 0 "" ""))
+- (pc))
+- ] UNSPEC_IDENTITY))
+- (clobber (reg:CC REG_CC))]
+- "reload_completed"
+ {
+ return ret_cond_branch (operands[1], avr_jump_mode (operands[0], insn), 0);
+ }
+ [(set_attr "type" "branch")])
+
+-;; ****************************************************************
+-;; AVR does not have following conditional jumps: LE,LEU,GT,GTU.
+-;; Convert them all to proper jumps.
+-;; ****************************************************************/
+
+-(define_insn_and_split "difficult_branch"
++(define_insn "difficult_branch"
+ [(set (pc)
+ (if_then_else (match_operator 1 "difficult_comparison_operator"
+ [(reg:CC REG_CC)
+@@ -7042,95 +7059,11 @@ (define_insn_and_split "difficult_branch"
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "reload_completed"
+- "#"
+- "&& reload_completed"
+- [(parallel [(set (pc)
+- (if_then_else (match_op_dup 1
+- [(reg:CC REG_CC)
+- (const_int 0)])
+- (label_ref (match_dup 0))
+- (pc)))
+- (clobber (reg:CC REG_CC))])])
+-
+-(define_insn "*difficult_branch"
+- [(set (pc)
+- (if_then_else (match_operator 1 "difficult_comparison_operator"
+- [(reg:CC REG_CC)
+- (const_int 0)])
+- (label_ref (match_operand 0 "" ""))
+- (pc)))
+- (clobber (reg:CC REG_CC))]
+- "reload_completed"
+ {
+ return ret_cond_branch (operands[1], avr_jump_mode (operands[0], insn), 0);
+ }
+ [(set_attr "type" "branch1")])
+
+-;; revers branch
+-
+-(define_insn_and_split "rvbranch"
+- [(set (pc)
+- (if_then_else (match_operator 1 "simple_comparison_operator"
+- [(reg:CC REG_CC)
+- (const_int 0)])
+- (pc)
+- (label_ref (match_operand 0 "" ""))))]
+- "reload_completed"
+- "#"
+- "&& reload_completed"
+- [(parallel [(set (pc)
+- (if_then_else (match_op_dup 1
+- [(reg:CC REG_CC)
+- (const_int 0)])
+- (pc)
+- (label_ref (match_dup 0))))
+- (clobber (reg:CC REG_CC))])])
+-
+-(define_insn "*rvbranch"
+- [(set (pc)
+- (if_then_else (match_operator 1 "simple_comparison_operator"
+- [(reg:CC REG_CC)
+- (const_int 0)])
+- (pc)
+- (label_ref (match_operand 0 "" ""))))
+- (clobber (reg:CC REG_CC))]
+- "reload_completed"
+- {
+- return ret_cond_branch (operands[1], avr_jump_mode (operands[0], insn), 1);
+- }
+- [(set_attr "type" "branch1")])
+-
+-(define_insn_and_split "difficult_rvbranch"
+- [(set (pc)
+- (if_then_else (match_operator 1 "difficult_comparison_operator"
+- [(reg:CC REG_CC)
+- (const_int 0)])
+- (pc)
+- (label_ref (match_operand 0 "" ""))))]
+- "reload_completed"
+- "#"
+- "&& reload_completed"
+- [(parallel [(set (pc)
+- (if_then_else (match_op_dup 1
+- [(reg:CC REG_CC)
+- (const_int 0)])
+- (pc)
+- (label_ref (match_dup 0))))
+- (clobber (reg:CC REG_CC))])])
+-
+-(define_insn "*difficult_rvbranch"
+- [(set (pc)
+- (if_then_else (match_operator 1 "difficult_comparison_operator"
+- [(reg:CC REG_CC)
+- (const_int 0)])
+- (pc)
+- (label_ref (match_operand 0 "" ""))))
+- (clobber (reg:CC REG_CC))]
+- "reload_completed"
+- {
+- return ret_cond_branch (operands[1], avr_jump_mode (operands[0], insn), 1);
+- }
+- [(set_attr "type" "branch")])
+
+ ;; **************************************************************************
+ ;; Unconditional and other jump instructions.
+@@ -7656,15 +7589,14 @@ (define_peephole ; "*dec-and-branchsi!=-1.d.clobber"
+ (clobber (reg:CC REG_CC))])
+ (parallel [(set (reg:CC REG_CC)
+ (compare:CC (match_dup 0)
+- (const_int -1)))
+- (clobber (match_operand:QI 1 "d_register_operand" ""))])
+- (parallel [(set (pc)
+- (if_then_else (eqne (reg:CC REG_CC)
+- (const_int 0))
+- (label_ref (match_operand 2 "" ""))
+- (pc)))
+- (clobber (reg:CC REG_CC))])]
+- ""
++ (const_int -1)))
++ (clobber (match_operand:QI 1 "scratch_or_d_register_operand"))])
++ (set (pc)
++ (if_then_else (eqne (reg:CC REG_CC)
++ (const_int 0))
++ (label_ref (match_operand 2))
++ (pc)))]
++ "dead_or_set_regno_p (insn, REG_CC)"
+ {
+ const char *op;
+ int jump_mode;
+@@ -7700,15 +7632,14 @@ (define_peephole ; "*dec-and-branchhi!=-1"
+ (clobber (reg:CC REG_CC))])
+ (parallel [(set (reg:CC REG_CC)
+ (compare:CC (match_dup 0)
+- (const_int -1)))
++ (const_int -1)))
+ (clobber (match_operand:QI 1 "d_register_operand" ""))])
+- (parallel [(set (pc)
+- (if_then_else (eqne (reg:CC REG_CC)
+- (const_int 0))
+- (label_ref (match_operand 2 "" ""))
+- (pc)))
+- (clobber (reg:CC REG_CC))])]
+- ""
++ (set (pc)
++ (if_then_else (eqne (reg:CC REG_CC)
++ (const_int 0))
++ (label_ref (match_operand 2))
++ (pc)))]
++ "dead_or_set_regno_p (insn, REG_CC)"
+ {
+ const char *op;
+ int jump_mode;
+@@ -7742,15 +7673,14 @@ (define_peephole ; "*dec-and-branchhi!=-1.d.clobber"
+ (clobber (reg:CC REG_CC))])
+ (parallel [(set (reg:CC REG_CC)
+ (compare:CC (match_dup 0)
+- (const_int -1)))
+- (clobber (match_operand:QI 1 "d_register_operand" ""))])
+- (parallel [(set (pc)
+- (if_then_else (eqne (reg:CC REG_CC)
+- (const_int 0))
+- (label_ref (match_operand 2 "" ""))
+- (pc)))
+- (clobber (reg:CC REG_CC))])]
+- ""
++ (const_int -1)))
++ (clobber (match_operand:QI 1 "scratch_or_d_register_operand"))])
++ (set (pc)
++ (if_then_else (eqne (reg:CC REG_CC)
++ (const_int 0))
++ (label_ref (match_operand 2))
++ (pc)))]
++ "dead_or_set_regno_p (insn, REG_CC)"
+ {
+ const char *op;
+ int jump_mode;
+@@ -7784,15 +7714,14 @@ (define_peephole ; "*dec-and-branchhi!=-1.l.clobber"
+ (clobber (reg:CC REG_CC))])
+ (parallel [(set (reg:CC REG_CC)
+ (compare:CC (match_dup 0)
+- (const_int -1)))
++ (const_int -1)))
+ (clobber (match_operand:QI 1 "d_register_operand" ""))])
+- (parallel [(set (pc)
+- (if_then_else (eqne (reg:CC REG_CC)
+- (const_int 0))
+- (label_ref (match_operand 2 "" ""))
+- (pc)))
+- (clobber (reg:CC REG_CC))])]
+- ""
++ (set (pc)
++ (if_then_else (eqne (reg:CC REG_CC)
++ (const_int 0))
++ (label_ref (match_operand 2))
++ (pc)))]
++ "dead_or_set_regno_p (insn, REG_CC)"
+ {
+ const char *op;
+ int jump_mode;
+@@ -7822,14 +7751,13 @@ (define_peephole ; "*dec-and-branchqi!=-1"
+ (clobber (reg:CC REG_CC))])
+ (set (reg:CC REG_CC)
+ (compare:CC (match_dup 0)
+- (const_int -1)))
+- (parallel [(set (pc)
+- (if_then_else (eqne (reg:CC REG_CC)
+- (const_int 0))
+- (label_ref (match_operand 1 "" ""))
+- (pc)))
+- (clobber (reg:CC REG_CC))])]
+- ""
++ (const_int -1)))
++ (set (pc)
++ (if_then_else (eqne (reg:CC REG_CC)
++ (const_int 0))
++ (label_ref (match_operand 1))
++ (pc)))]
++ "dead_or_set_regno_p (insn, REG_CC)"
+ {
+ const char *op;
+ int jump_mode;
+@@ -7855,14 +7783,14 @@ (define_peephole ; "*dec-and-branchqi!=-1"
+ (define_peephole ; "*cpse.eq"
+ [(set (reg:CC REG_CC)
+ (compare:CC (match_operand:ALL1 1 "register_operand" "r,r")
+- (match_operand:ALL1 2 "reg_or_0_operand" "r,Y00")))
+- (parallel [(set (pc)
+- (if_then_else (eq (reg:CC REG_CC)
+- (const_int 0))
+- (label_ref (match_operand 0 "" ""))
+- (pc)))
+- (clobber (reg:CC REG_CC))])]
+- "jump_over_one_insn_p (insn, operands[0])"
++ (match_operand:ALL1 2 "reg_or_0_operand" "r,Y00")))
++ (set (pc)
++ (if_then_else (eq (reg:CC REG_CC)
++ (const_int 0))
++ (label_ref (match_operand 0))
++ (pc)))]
++ "jump_over_one_insn_p (insn, operands[0])
++ && dead_or_set_regno_p (insn, REG_CC)"
+ "@
+ cpse %1,%2
+ cpse %1,__zero_reg__")
+@@ -7890,16 +7818,16 @@ (define_peephole ; "*cpse.eq"
+
+ (define_peephole ; "*cpse.ne"
+ [(set (reg:CC REG_CC)
+- (compare:CC (match_operand:ALL1 1 "register_operand" "")
+- (match_operand:ALL1 2 "reg_or_0_operand" "")))
+- (parallel [(set (pc)
+- (if_then_else (ne (reg:CC REG_CC)
+- (const_int 0))
+- (label_ref (match_operand 0 "" ""))
+- (pc)))
+- (clobber (reg:CC REG_CC))])]
+- "!AVR_HAVE_JMP_CALL
+- || !TARGET_SKIP_BUG"
++ (compare:CC (match_operand:ALL1 1 "register_operand")
++ (match_operand:ALL1 2 "reg_or_0_operand")))
++ (set (pc)
++ (if_then_else (ne (reg:CC REG_CC)
++ (const_int 0))
++ (label_ref (match_operand 0))
++ (pc)))]
++ "(!AVR_HAVE_JMP_CALL
++ || !TARGET_SKIP_BUG)
++ && dead_or_set_regno_p (insn, REG_CC)"
+ {
+ if (operands[2] == CONST0_RTX (<MODE>mode))
+ operands[2] = zero_reg_rtx;
+@@ -8094,7 +8022,7 @@ (define_insn_and_split "delay_cycles_1"
+ (const_int 1)]
+ UNSPECV_DELAY_CYCLES)
+ (set (match_dup 1)
+- (unspec_volatile:BLK [(match_dup 1)] UNSPECV_MEMORY_BARRIER))
++ (unspec_volatile:BLK [(match_dup 1)] UNSPECV_MEMORY_BARRIER))
+ (clobber (match_dup 2))
+ (clobber (reg:CC REG_CC))])])
+
+@@ -8126,7 +8054,7 @@ (define_insn_and_split "delay_cycles_2"
+ (const_int 2)]
+ UNSPECV_DELAY_CYCLES)
+ (set (match_dup 1)
+- (unspec_volatile:BLK [(match_dup 1)] UNSPECV_MEMORY_BARRIER))
++ (unspec_volatile:BLK [(match_dup 1)] UNSPECV_MEMORY_BARRIER))
+ (clobber (match_dup 2))
+ (clobber (reg:CC REG_CC))])]
+ ""
+@@ -8163,7 +8091,7 @@ (define_insn_and_split "delay_cycles_3"
+ (const_int 3)]
+ UNSPECV_DELAY_CYCLES)
+ (set (match_dup 1)
+- (unspec_volatile:BLK [(match_dup 1)] UNSPECV_MEMORY_BARRIER))
++ (unspec_volatile:BLK [(match_dup 1)] UNSPECV_MEMORY_BARRIER))
+ (clobber (match_dup 2))
+ (clobber (match_dup 3))
+ (clobber (match_dup 4))
+@@ -8206,7 +8134,7 @@ (define_insn_and_split "delay_cycles_4"
+ (const_int 4)]
+ UNSPECV_DELAY_CYCLES)
+ (set (match_dup 1)
+- (unspec_volatile:BLK [(match_dup 1)] UNSPECV_MEMORY_BARRIER))
++ (unspec_volatile:BLK [(match_dup 1)] UNSPECV_MEMORY_BARRIER))
+ (clobber (match_dup 2))
+ (clobber (match_dup 3))
+ (clobber (match_dup 4))
+@@ -9095,16 +9023,20 @@ (define_insn "*movbitqi.1-6.b"
+ "bst %3,0\;bld %0,%4"
+ [(set_attr "length" "2")])
+
+-;; Move bit $3.0 into bit $0.0.
+-;; For bit 0, combiner generates slightly different pattern.
+-(define_insn "*movbitqi.0"
+- [(set (match_operand:QI 0 "register_operand" "=r")
+- (ior:QI (and:QI (match_operand:QI 1 "register_operand" "0")
+- (match_operand:QI 2 "single_zero_operand" "n"))
+- (and:QI (match_operand:QI 3 "register_operand" "r")
+- (const_int 1))))]
+- "0 == exact_log2 (~INTVAL(operands[2]) & GET_MODE_MASK (QImode))"
+- "bst %3,0\;bld %0,0"
++;; Move bit $3.x into bit $0.x.
++(define_insn "*movbit<mode>.0-6"
++ [(set (match_operand:QISI 0 "register_operand" "=r")
++ (ior:QISI (and:QISI (match_operand:QISI 1 "register_operand" "0")
++ (match_operand:QISI 2 "single_zero_operand" "n"))
++ (and:QISI (match_operand:QISI 3 "register_operand" "r")
++ (match_operand:QISI 4 "single_one_operand" "n"))))]
++ "GET_MODE_MASK(<MODE>mode)
++ == (GET_MODE_MASK(<MODE>mode) & (INTVAL(operands[2]) ^ INTVAL(operands[4])))"
++ {
++ auto bitmask = GET_MODE_MASK (<MODE>mode) & UINTVAL (operands[4]);
++ operands[4] = GEN_INT (exact_log2 (bitmask));
++ return "bst %T3%T4" CR_TAB "bld %T0%T4";
++ }
+ [(set_attr "length" "2")])
+
+ ;; Move bit $2.0 into bit $0.7.
+@@ -9489,6 +9421,258 @@ (define_peephole2
+ (clobber (reg:CC REG_CC))])])
+
+
++;; Try optimize decrement-and-branch. When we have an addition followed
++;; by a comparison of the result against zero, we can output the addition
++;; in such a way that SREG.N and SREG.Z are set according to the result.
++
++;; { -1, +1 } for QImode, otherwise the empty set.
++(define_mode_attr p1m1 [(QI "N P")
++ (HI "Yxx") (PSI "Yxx") (SI "Yxx")])
++
++;; FIXME: reload1.cc::do_output_reload() does not support output reloads
++;; for JUMP_INSNs, hence letting combine doing decrement-and-branch like
++;; the following might run into ICE. Doing reloads by hand is too painful...
++;
++; (define_insn_and_split "*add.for.eqne.<mode>.cbranch"
++; [(set (pc)
++; (if_then_else (eqne (match_operand:QISI 1 "register_operand" "0")
++; (match_operand:QISI 2 "const_int_operand" "n"))
++; (label_ref (match_operand 4))
++; (pc)))
++; (set (match_operand:QISI 0 "register_operand" "=r")
++; (plus:QISI (match_dup 1)
++; (match_operand:QISI 3 "const_int_operand" "n")))]
++; ;; No clobber for now as combine might not have one handy.
++; ;; We pop a scatch in split1.
++; "!reload_completed
++; && const0_rtx == simplify_binary_operation (PLUS, <MODE>mode,
++; operands[2], operands[3])"
++; { gcc_unreachable(); }
++; "&& 1"
++; [(parallel [(set (pc)
++; (if_then_else (eqne (match_dup 1)
++; (match_dup 2))
++; (label_ref (match_dup 4))
++; (pc)))
++; (set (match_dup 0)
++; (plus:QISI (match_dup 1)
++; (match_dup 3)))
++; (clobber (scratch:QI))])])
++;
++;; ...Hence, stick with RTL peepholes for now. Unfortunately, there is no
++;; canonical form, and if reload shuffles registers around, we might miss
++;; opportunities to match a decrement-and-branch.
++;; doloop_end doesn't reload either, so doloop_end also won't work.
++
++(define_expand "gen_add_for_<code>_<mode>"
++ ; "*add.for.eqne.<mode>"
++ [(parallel [(set (reg:CC REG_CC)
++ (compare:CC (plus:QISI (match_operand:QISI 0 "register_operand")
++ (match_operand:QISI 1 "const_int_operand"))
++ (const_int 0)))
++ (set (match_dup 0)
++ (plus:QISI (match_dup 0)
++ (match_dup 1)))
++ (clobber (match_operand:QI 3))])
++ ; "branch"
++ (set (pc)
++ (if_then_else (eqne (reg:CC REG_CC)
++ (const_int 0))
++ (label_ref (match_dup 2))
++ (pc)))])
++
++
++;; 1/3: A version without clobber: d-reg or 8-bit adds +/-1.
++(define_peephole2
++ [(parallel [(set (match_operand:QISI 0 "register_operand")
++ (plus:QISI (match_dup 0)
++ (match_operand:QISI 1 "const_int_operand")))
++ (clobber (reg:CC REG_CC))])
++ (set (reg:CC REG_CC)
++ (compare:CC (match_dup 0)
++ (const_int 0)))
++ (set (pc)
++ (if_then_else (eqne (reg:CC REG_CC)
++ (const_int 0))
++ (label_ref (match_operand 2))
++ (pc)))]
++ "peep2_regno_dead_p (3, REG_CC)
++ && (d_register_operand (operands[0], <MODE>mode)
++ || (<MODE>mode == QImode
++ && (INTVAL (operands[1]) == 1
++ || INTVAL (operands[1]) == -1)))"
++ [(scratch)]
++ {
++ emit (gen_gen_add_for_<code>_<mode> (operands[0], operands[1], operands[2],
++ gen_rtx_SCRATCH (QImode)));
++ DONE;
++ })
++
++;; 2/3: A version with clobber from the insn.
++(define_peephole2
++ [(parallel [(set (match_operand:QISI 0 "register_operand")
++ (plus:QISI (match_dup 0)
++ (match_operand:QISI 1 "const_int_operand")))
++ (clobber (match_operand:QI 3 "scratch_or_d_register_operand"))
++ (clobber (reg:CC REG_CC))])
++ (parallel [(set (reg:CC REG_CC)
++ (compare:CC (match_dup 0)
++ (const_int 0)))
++ (clobber (match_operand:QI 4 "scratch_or_d_register_operand"))])
++ (set (pc)
++ (if_then_else (eqne (reg:CC REG_CC)
++ (const_int 0))
++ (label_ref (match_operand 2))
++ (pc)))]
++ "peep2_regno_dead_p (3, REG_CC)"
++ [(scratch)]
++ {
++ rtx scratch = REG_P (operands[3]) ? operands[3] : operands[4];
++
++ // We need either a d-register or a scratch register to clobber.
++ if (! REG_P (scratch)
++ && ! d_register_operand (operands[0], <MODE>mode)
++ && ! (QImode == <MODE>mode
++ && (INTVAL (operands[1]) == 1
++ || INTVAL (operands[1]) == -1)))
++ {
++ FAIL;
++ }
++ emit (gen_gen_add_for_<code>_<mode> (operands[0], operands[1], operands[2],
++ scratch));
++ DONE;
++ })
++
++;; 3/3 A version with a clobber from peephole2.
++(define_peephole2
++ [(match_scratch:QI 3 "d")
++ (parallel [(set (match_operand:QISI 0 "register_operand")
++ (plus:QISI (match_dup 0)
++ (match_operand:QISI 1 "const_int_operand")))
++ (clobber (reg:CC REG_CC))])
++ (set (reg:CC REG_CC)
++ (compare:CC (match_dup 0)
++ (const_int 0)))
++ (set (pc)
++ (if_then_else (eqne (reg:CC REG_CC)
++ (const_int 0))
++ (label_ref (match_operand 2))
++ (pc)))]
++ "peep2_regno_dead_p (3, REG_CC)"
++ [(scratch)]
++ {
++ emit (gen_gen_add_for_<code>_<mode> (operands[0], operands[1], operands[2],
++ operands[3]));
++ DONE;
++ })
++
++;; Result of the above three peepholes is an addition that also
++;; performs an EQ or NE comparison (of the result) against zero.
++;; FIXME: Using (match_dup 0) instead of operands[3/4] makes rnregs
++;; barf in regrename.cc::merge_overlapping_regs(). For now, use the
++;; fix from PR50788: Constrain as "0".
++(define_insn "*add.for.eqne.<mode>"
++ [(set (reg:CC REG_CC)
++ (compare:CC
++ (plus:QISI (match_operand:QISI 3 "register_operand" "0,0 ,0")
++ (match_operand:QISI 1 "const_int_operand" "n,<p1m1>,n"))
++ (const_int 0)))
++ (set (match_operand:QISI 0 "register_operand" "=d,*r ,r")
++ (plus:QISI (match_operand:QISI 4 "register_operand" "0,0 ,0")
++ (match_dup 1)))
++ (clobber (match_scratch:QI 2 "=X,X ,&d"))]
++ "reload_completed"
++ {
++ return avr_out_plus_set_ZN (operands, nullptr);
++ }
++ [(set_attr "adjust_len" "add_set_ZN")])
++
++
++;; Swapping both comparison and branch condition. This can turn difficult
++;; branches to easy ones. And in some cases, a comparison against one can
++;; be turned into a comparison against zero.
++
++(define_peephole2 ; "*swapped_tst<mode>"
++ [(parallel [(set (reg:CC REG_CC)
++ (compare:CC (match_operand:ALLs234 1 "register_operand")
++ (match_operand:ALLs234 2 "const_operand")))
++ (clobber (match_operand:QI 3 "scratch_operand"))])
++ (set (pc)
++ (if_then_else (match_operator 0 "ordered_comparison_operator"
++ [(reg:CC REG_CC)
++ (const_int 0)])
++ (label_ref (match_operand 4))
++ (pc)))]
++ "peep2_regno_dead_p (2, REG_CC)"
++ [(set (reg:CC REG_CC)
++ (compare:CC (match_dup 2)
++ (match_dup 1)))
++ ; "branch"
++ (set (pc)
++ (if_then_else (match_op_dup 0 [(reg:CC REG_CC)
++ (const_int 0)])
++ (label_ref (match_dup 4))
++ (pc)))]
++ {
++ rtx xval = avr_to_int_mode (operands[2]);
++ enum rtx_code code = GET_CODE (operands[0]);
++
++ if (code == GT && xval == const0_rtx)
++ code = LT;
++ else if (code == GE && xval == const1_rtx)
++ code = LT;
++ else if (code == LE && xval == const0_rtx)
++ code = GE;
++ else if (code == LT && xval == const1_rtx)
++ code = GE;
++ else
++ FAIL;
++
++ operands[2] = CONST0_RTX (<MODE>mode);
++ PUT_CODE (operands[0], code);
++ })
++
++;; Same, but for 8-bit modes which have no scratch reg.
++(define_peephole2 ; "*swapped_tst<mode>"
++ [(set (reg:CC REG_CC)
++ (compare:CC (match_operand:ALLs1 1 "register_operand")
++ (match_operand:ALLs1 2 "const_operand")))
++ (set (pc)
++ (if_then_else (match_operator 0 "ordered_comparison_operator"
++ [(reg:CC REG_CC)
++ (const_int 0)])
++ (label_ref (match_operand 4))
++ (pc)))]
++ "peep2_regno_dead_p (2, REG_CC)"
++ [(set (reg:CC REG_CC)
++ (compare:CC (match_dup 2)
++ (match_dup 1)))
++ ; "branch"
++ (set (pc)
++ (if_then_else (match_op_dup 0 [(reg:CC REG_CC)
++ (const_int 0)])
++ (label_ref (match_dup 4))
++ (pc)))]
++ {
++ rtx xval = avr_to_int_mode (operands[2]);
++ enum rtx_code code = GET_CODE (operands[0]);
++
++ if (code == GT && xval == const0_rtx)
++ code = LT;
++ else if (code == GE && xval == const1_rtx)
++ code = LT;
++ else if (code == LE && xval == const0_rtx)
++ code = GE;
++ else if (code == LT && xval == const1_rtx)
++ code = GE;
++ else
++ FAIL;
++
++ operands[2] = CONST0_RTX (<MODE>mode);
++ PUT_CODE (operands[0], code);
++ })
++
++
+ (define_expand "extzv"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (zero_extract:QI (match_operand:QI 1 "register_operand" "")
+--- a/src/gcc/config/avr/constraints.md
++++ b/src/gcc/config/avr/constraints.md
+@@ -245,6 +245,11 @@ (define_constraint "Ym2"
+ (match_test "INTVAL (avr_to_int_mode (op)) == -2"))
+ (match_test "satisfies_constraint_Cm2 (op)")))
+
++;; Constraint that's the empty set. Useful with mode and code iterators.
++(define_constraint "Yxx"
++ "A constraints that is always false"
++ (match_test "false"))
++
+ (define_constraint "Yx2"
+ "Fixed-point or integer constant not in the range @minus{}2 @dots{} 2"
+ (and (ior (match_code "const_int")
+--- a/src/gcc/config/avr/predicates.md
++++ b/src/gcc/config/avr/predicates.md
+@@ -27,6 +27,11 @@ (define_predicate "d_register_operand"
+ (and (match_code "reg")
+ (match_test "REGNO (op) >= 16 && REGNO (op) <= 31")))
+
++(define_predicate "scratch_or_d_register_operand"
++ (ior (match_operand 0 "d_register_operand")
++ (and (match_code ("scratch"))
++ (match_operand 0 "scratch_operand"))))
++
+ (define_predicate "even_register_operand"
+ (and (match_code "reg")
+ (and (match_test "REGNO (op) <= 31")
+--- a/src/gcc/config/i386/cygwin.h
++++ b/src/gcc/config/i386/cygwin.h
+@@ -57,7 +57,7 @@ along with GCC; see the file COPYING3. If not see
+
+ #undef ENDFILE_SPEC
+ #define ENDFILE_SPEC \
+- "%{Ofast|ffast-math|funsafe-math-optimizations:crtfastmath.o%s}\
++ "%{mdaz-ftz:crtfastmath.o%s;Ofast|ffast-math|funsafe-math-optimizations:%{!mno-daz-ftz:crtfastmath.o%s}} \
+ %{!shared:%:if-exists(default-manifest.o%s)}\
+ %{fvtable-verify=none:%s; \
+ fvtable-verify=preinit:vtv_end.o%s; \
+--- a/src/gcc/config/i386/darwin.h
++++ b/src/gcc/config/i386/darwin.h
+@@ -109,8 +109,8 @@ along with GCC; see the file COPYING3. If not see
+ "%{!force_cpusubtype_ALL:-force_cpusubtype_ALL} "
+
+ #undef ENDFILE_SPEC
+-#define ENDFILE_SPEC \
+- "%{Ofast|ffast-math|funsafe-math-optimizations:crtfastmath.o%s} \
++#define ENDFILE_SPEC
++\ "%{mdaz-ftz:crtfastmath.o%s;Ofast|ffast-math|funsafe-math-optimizations:%{!mno-daz-ftz:crtfastmath.o%s}} \
+ %{mpc32:crtprec32.o%s} \
+ %{mpc64:crtprec64.o%s} \
+ %{mpc80:crtprec80.o%s}" TM_DESTRUCTOR
+--- a/src/gcc/config/i386/gnu-user-common.h
++++ b/src/gcc/config/i386/gnu-user-common.h
+@@ -47,7 +47,7 @@ along with GCC; see the file COPYING3. If not see
+
+ /* Similar to standard GNU userspace, but adding -ffast-math support. */
+ #define GNU_USER_TARGET_MATHFILE_SPEC \
+- "%{Ofast|ffast-math|funsafe-math-optimizations:crtfastmath.o%s} \
++ "%{mdaz-ftz:crtfastmath.o%s;Ofast|ffast-math|funsafe-math-optimizations:%{!mno-daz-ftz:crtfastmath.o%s}} \
+ %{mpc32:crtprec32.o%s} \
+ %{mpc64:crtprec64.o%s} \
+ %{mpc80:crtprec80.o%s}"
+--- a/src/gcc/config/i386/i386-builtins.cc
++++ b/src/gcc/config/i386/i386-builtins.cc
+@@ -1790,7 +1790,7 @@ ix86_vectorize_builtin_gather (const_tree mem_vectype,
+ ? !TARGET_USE_GATHER_2PARTS
+ : (known_eq (TYPE_VECTOR_SUBPARTS (mem_vectype), 4u)
+ ? !TARGET_USE_GATHER_4PARTS
+- : !TARGET_USE_GATHER)))
++ : !TARGET_USE_GATHER_8PARTS)))
+ return NULL_TREE;
+
+ if ((TREE_CODE (index_type) != INTEGER_TYPE
+--- a/src/gcc/config/i386/i386-features.cc
++++ b/src/gcc/config/i386/i386-features.cc
+@@ -1875,8 +1875,7 @@ public:
+ /* opt_pass methods: */
+ virtual bool gate (function *)
+ {
+- return TARGET_AVX && TARGET_VZEROUPPER
+- && flag_expensive_optimizations && !optimize_size;
++ return TARGET_AVX && TARGET_VZEROUPPER;
+ }
+
+ virtual unsigned int execute (function *)
+--- a/src/gcc/config/i386/i386-options.cc
++++ b/src/gcc/config/i386/i386-options.cc
+@@ -137,6 +137,11 @@ along with GCC; see the file COPYING3. If not see
+ #define m_GOLDMONT_PLUS (HOST_WIDE_INT_1U<<PROCESSOR_GOLDMONT_PLUS)
+ #define m_TREMONT (HOST_WIDE_INT_1U<<PROCESSOR_TREMONT)
+ #define m_INTEL (HOST_WIDE_INT_1U<<PROCESSOR_INTEL)
++/* Gather Data Sampling / CVE-2022-40982 / INTEL-SA-00828.
++ Software mitigation. */
++#define m_GDS (m_SKYLAKE | m_SKYLAKE_AVX512 | m_CANNONLAKE \
++ | m_ICELAKE_CLIENT | m_ICELAKE_SERVER | m_CASCADELAKE \
++ | m_TIGERLAKE | m_COOPERLAKE | m_ROCKETLAKE)
+
+ #define m_GEODE (HOST_WIDE_INT_1U<<PROCESSOR_GEODE)
+ #define m_K6 (HOST_WIDE_INT_1U<<PROCESSOR_K6)
+@@ -1378,7 +1383,11 @@ ix86_valid_target_attribute_tree (tree fndecl, tree args,
+ if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
+ opts->x_ix86_tune_string
+ = ggc_strdup (option_strings[IX86_FUNCTION_SPECIFIC_TUNE]);
+- else if (orig_tune_defaulted)
++ /* If we have explicit arch string and no tune string specified, set
++ tune_string to NULL and later it will be overriden by arch_string
++ so target clones can get proper optimization. */
++ else if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
++ || orig_tune_defaulted)
+ opts->x_ix86_tune_string = NULL;
+
+ /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
+@@ -1696,20 +1705,46 @@ parse_mtune_ctrl_str (struct gcc_options *opts, bool dump)
+ curr_feature_string++;
+ clear = true;
+ }
+- for (i = 0; i < X86_TUNE_LAST; i++)
+- {
+- if (!strcmp (curr_feature_string, ix86_tune_feature_names[i]))
+- {
+- ix86_tune_features[i] = !clear;
+- if (dump)
+- fprintf (stderr, "Explicitly %s feature %s\n",
+- clear ? "clear" : "set", ix86_tune_feature_names[i]);
+- break;
+- }
+- }
+- if (i == X86_TUNE_LAST)
+- error ("unknown parameter to option %<-mtune-ctrl%>: %s",
+- clear ? curr_feature_string - 1 : curr_feature_string);
++
++ if (!strcmp (curr_feature_string, "use_gather"))
++ {
++ ix86_tune_features[X86_TUNE_USE_GATHER_2PARTS] = !clear;
++ ix86_tune_features[X86_TUNE_USE_GATHER_4PARTS] = !clear;
++ ix86_tune_features[X86_TUNE_USE_GATHER_8PARTS] = !clear;
++ if (dump)
++ fprintf (stderr, "Explicitly %s features use_gather_2parts,"
++ " use_gather_4parts, use_gather_8parts\n",
++ clear ? "clear" : "set");
++
++ }
++ else if (!strcmp (curr_feature_string, "use_scatter"))
++ {
++ ix86_tune_features[X86_TUNE_USE_SCATTER_2PARTS] = !clear;
++ ix86_tune_features[X86_TUNE_USE_SCATTER_4PARTS] = !clear;
++ ix86_tune_features[X86_TUNE_USE_SCATTER_8PARTS] = !clear;
++ if (dump)
++ fprintf (stderr, "Explicitly %s features use_scatter_2parts,"
++ " use_scatter_4parts, use_scatter_8parts\n",
++ clear ? "clear" : "set");
++ }
++ else
++ {
++ for (i = 0; i < X86_TUNE_LAST; i++)
++ {
++ if (!strcmp (curr_feature_string, ix86_tune_feature_names[i]))
++ {
++ ix86_tune_features[i] = !clear;
++ if (dump)
++ fprintf (stderr, "Explicitly %s feature %s\n",
++ clear ? "clear" : "set", ix86_tune_feature_names[i]);
++ break;
++ }
++ }
++
++ if (i == X86_TUNE_LAST)
++ error ("unknown parameter to option %<-mtune-ctrl%>: %s",
++ clear ? curr_feature_string - 1 : curr_feature_string);
++ }
+ curr_feature_string = next_feature_string;
+ }
+ while (curr_feature_string);
+@@ -2676,7 +2711,9 @@ ix86_option_override_internal (bool main_args_p,
+ sorry ("%<-mcall-ms2sysv-xlogues%> isn%'t currently supported with SEH");
+
+ if (!(opts_set->x_target_flags & MASK_VZEROUPPER)
+- && TARGET_EMIT_VZEROUPPER)
++ && TARGET_EMIT_VZEROUPPER
++ && flag_expensive_optimizations
++ && !optimize_size)
+ opts->x_target_flags |= MASK_VZEROUPPER;
+ if (!(opts_set->x_target_flags & MASK_STV))
+ opts->x_target_flags |= MASK_STV;
+--- a/src/gcc/config/i386/i386.cc
++++ b/src/gcc/config/i386/i386.cc
+@@ -12238,8 +12238,8 @@ output_pic_addr_const (FILE *file, rtx x, int code)
+ assemble_name (asm_out_file, buf);
+ break;
+
+- case CONST_INT:
+- fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
++ CASE_CONST_SCALAR_INT:
++ output_addr_const (file, x);
+ break;
+
+ case CONST:
+@@ -18396,8 +18396,10 @@ ix86_gimple_fold_builtin (gimple_stmt_iterator *gsi)
+ tree itype = GET_MODE_INNER (TYPE_MODE (type)) == E_SFmode
+ ? intSI_type_node : intDI_type_node;
+ type = get_same_sized_vectype (itype, type);
+- arg2 = gimple_build (&stmts, VIEW_CONVERT_EXPR, type, arg2);
+ }
++ else
++ type = signed_type_for (type);
++ arg2 = gimple_build (&stmts, VIEW_CONVERT_EXPR, type, arg2);
+ tree zero_vec = build_zero_cst (type);
+ tree cmp_type = truth_type_for (type);
+ tree cmp = gimple_build (&stmts, LT_EXPR, cmp_type, arg2, zero_vec);
+@@ -18935,7 +18937,7 @@ ix86_vectorize_builtin_scatter (const_tree vectype,
+ ? !TARGET_USE_SCATTER_2PARTS
+ : (known_eq (TYPE_VECTOR_SUBPARTS (vectype), 4u)
+ ? !TARGET_USE_SCATTER_4PARTS
+- : !TARGET_USE_SCATTER))
++ : !TARGET_USE_SCATTER_8PARTS))
+ return NULL_TREE;
+
+ if ((TREE_CODE (index_type) != INTEGER_TYPE
+--- a/src/gcc/config/i386/i386.h
++++ b/src/gcc/config/i386/i386.h
+@@ -398,10 +398,10 @@ extern unsigned char ix86_tune_features[X86_TUNE_LAST];
+ ix86_tune_features[X86_TUNE_USE_GATHER_4PARTS]
+ #define TARGET_USE_SCATTER_4PARTS \
+ ix86_tune_features[X86_TUNE_USE_SCATTER_4PARTS]
+-#define TARGET_USE_GATHER \
+- ix86_tune_features[X86_TUNE_USE_GATHER]
+-#define TARGET_USE_SCATTER \
+- ix86_tune_features[X86_TUNE_USE_SCATTER]
++#define TARGET_USE_GATHER_8PARTS \
++ ix86_tune_features[X86_TUNE_USE_GATHER_8PARTS]
++#define TARGET_USE_SCATTER_8PARTS \
++ ix86_tune_features[X86_TUNE_USE_SCATTER_8PARTS]
+ #define TARGET_FUSE_CMP_AND_BRANCH_32 \
+ ix86_tune_features[X86_TUNE_FUSE_CMP_AND_BRANCH_32]
+ #define TARGET_FUSE_CMP_AND_BRANCH_64 \
+--- a/src/gcc/config/i386/i386.opt
++++ b/src/gcc/config/i386/i386.opt
+@@ -420,6 +420,10 @@ mpc80
+ Target RejectNegative
+ Set 80387 floating-point precision to 80-bit.
+
++mdaz-ftz
++Target
++Set the FTZ and DAZ Flags.
++
+ mpreferred-stack-boundary=
+ Target RejectNegative Joined UInteger Var(ix86_preferred_stack_boundary_arg)
+ Attempt to keep stack aligned to this power of 2.
+@@ -1214,3 +1218,11 @@ Do not use GOT to access external symbols.
+ -param=x86-stlf-window-ninsns=
+ Target Joined UInteger Var(x86_stlf_window_ninsns) Init(64) Param
+ Instructions number above which STFL stall penalty can be compensated.
++
++mgather
++Target Alias(mtune-ctrl=, use_gather, ^use_gather)
++Enable vectorization for gather instruction.
++
++mscatter
++Target Alias(mtune-ctrl=, use_scatter, ^use_scatter)
++Enable vectorization for scatter instruction.
+--- a/src/gcc/config/i386/mingw32.h
++++ b/src/gcc/config/i386/mingw32.h
+@@ -197,7 +197,7 @@ along with GCC; see the file COPYING3. If not see
+
+ #undef ENDFILE_SPEC
+ #define ENDFILE_SPEC \
+- "%{Ofast|ffast-math|funsafe-math-optimizations:crtfastmath.o%s} \
++ "%{mdaz-ftz:crtfastmath.o%s;Ofast|ffast-math|funsafe-math-optimizations:%{!mno-daz-ftz:crtfastmath.o%s}} \
+ %{!shared:%:if-exists(default-manifest.o%s)}\
+ %{fvtable-verify=none:%s; \
+ fvtable-verify=preinit:vtv_end.o%s; \
+--- a/src/gcc/config/i386/sse.md
++++ b/src/gcc/config/i386/sse.md
+@@ -1411,12 +1411,12 @@ (define_expand "<avx512>_load<mode>_mask"
+ })
+
+ (define_insn "*<avx512>_load<mode>_mask"
+- [(set (match_operand:VI12_AVX512VL 0 "register_operand" "=v")
+- (vec_merge:VI12_AVX512VL
+- (unspec:VI12_AVX512VL
+- [(match_operand:VI12_AVX512VL 1 "memory_operand" "m")]
++ [(set (match_operand:VI12HF_AVX512VL 0 "register_operand" "=v")
++ (vec_merge:VI12HF_AVX512VL
++ (unspec:VI12HF_AVX512VL
++ [(match_operand:VI12HF_AVX512VL 1 "memory_operand" "m")]
+ UNSPEC_MASKLOAD)
+- (match_operand:VI12_AVX512VL 2 "nonimm_or_0_operand" "0C")
++ (match_operand:VI12HF_AVX512VL 2 "nonimm_or_0_operand" "0C")
+ (match_operand:<avx512fmaskmode> 3 "register_operand" "Yk")))]
+ "TARGET_AVX512BW"
+ "vmovdqu<ssescalarsize>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}"
+@@ -1425,9 +1425,9 @@ (define_insn "*<avx512>_load<mode>_mask"
+ (set_attr "mode" "<sseinsnmode>")])
+
+ (define_insn_and_split "*<avx512>_load<mode>"
+- [(set (match_operand:VI12_AVX512VL 0 "register_operand" "=v")
+- (unspec:VI12_AVX512VL
+- [(match_operand:VI12_AVX512VL 1 "memory_operand" "m")]
++ [(set (match_operand:VI12HF_AVX512VL 0 "register_operand" "=v")
++ (unspec:VI12HF_AVX512VL
++ [(match_operand:VI12HF_AVX512VL 1 "memory_operand" "m")]
+ UNSPEC_MASKLOAD))]
+ "TARGET_AVX512BW"
+ "#"
+@@ -1554,7 +1554,7 @@ (define_insn "<avx512>_blendm<mode>"
+ (set_attr "prefix" "evex")
+ (set_attr "mode" "<sseinsnmode>")])
+
+-(define_insn "<avx512>_store<mode>_mask"
++(define_insn "*<avx512>_store<mode>_mask"
+ [(set (match_operand:V48_AVX512VL 0 "memory_operand" "=m")
+ (vec_merge:V48_AVX512VL
+ (match_operand:V48_AVX512VL 1 "register_operand" "v")
+@@ -1582,7 +1582,7 @@ (define_insn "<avx512>_store<mode>_mask"
+ (set_attr "memory" "store")
+ (set_attr "mode" "<sseinsnmode>")])
+
+-(define_insn "<avx512>_store<mode>_mask"
++(define_insn "*<avx512>_store<mode>_mask"
+ [(set (match_operand:VI12HF_AVX512VL 0 "memory_operand" "=m")
+ (vec_merge:VI12HF_AVX512VL
+ (match_operand:VI12HF_AVX512VL 1 "register_operand" "v")
+@@ -6318,6 +6318,14 @@ (define_int_attr complexpairopname
+ [(UNSPEC_COMPLEX_FMA_PAIR "fmaddc")
+ (UNSPEC_COMPLEX_FCMA_PAIR "fcmaddc")])
+
++(define_int_attr int_comm
++ [(UNSPEC_COMPLEX_FMA "")
++ (UNSPEC_COMPLEX_FMA_PAIR "")
++ (UNSPEC_COMPLEX_FCMA "")
++ (UNSPEC_COMPLEX_FCMA_PAIR "")
++ (UNSPEC_COMPLEX_FMUL "%")
++ (UNSPEC_COMPLEX_FCMUL "")])
++
+ (define_int_attr conj_op
+ [(UNSPEC_COMPLEX_FMA "")
+ (UNSPEC_COMPLEX_FCMA "_conj")
+@@ -6431,7 +6439,7 @@ (define_expand "cmla<conj_op><mode>4"
+ (define_insn "fma_<complexopname>_<mode><sdc_maskz_name><round_name>"
+ [(set (match_operand:VF_AVX512FP16VL 0 "register_operand" "=&v")
+ (unspec:VF_AVX512FP16VL
+- [(match_operand:VF_AVX512FP16VL 1 "<round_nimm_predicate>" "%v")
++ [(match_operand:VF_AVX512FP16VL 1 "<round_nimm_predicate>" "<int_comm>v")
+ (match_operand:VF_AVX512FP16VL 2 "<round_nimm_predicate>" "<round_constraint>")
+ (match_operand:VF_AVX512FP16VL 3 "<round_nimm_predicate>" "0")]
+ UNSPEC_COMPLEX_F_C_MA))]
+@@ -6495,7 +6503,7 @@ (define_insn_and_split "fma_<complexopname>_<mode>_fma_zero"
+ (define_insn "fma_<complexpairopname>_<mode>_pair"
+ [(set (match_operand:VF1_AVX512VL 0 "register_operand" "=&v")
+ (unspec:VF1_AVX512VL
+- [(match_operand:VF1_AVX512VL 1 "vector_operand" "%v")
++ [(match_operand:VF1_AVX512VL 1 "vector_operand" "<int_comm>v")
+ (match_operand:VF1_AVX512VL 2 "bcst_vector_operand" "vmBr")
+ (match_operand:VF1_AVX512VL 3 "vector_operand" "0")]
+ UNSPEC_COMPLEX_F_C_MA_PAIR))]
+@@ -6562,7 +6570,7 @@ (define_insn "<avx512>_<complexopname>_<mode>_mask<round_name>"
+ [(set (match_operand:VF_AVX512FP16VL 0 "register_operand" "=&v")
+ (vec_merge:VF_AVX512FP16VL
+ (unspec:VF_AVX512FP16VL
+- [(match_operand:VF_AVX512FP16VL 1 "nonimmediate_operand" "%v")
++ [(match_operand:VF_AVX512FP16VL 1 "nonimmediate_operand" "<int_comm>v")
+ (match_operand:VF_AVX512FP16VL 2 "nonimmediate_operand" "<round_constraint>")
+ (match_operand:VF_AVX512FP16VL 3 "register_operand" "0")]
+ UNSPEC_COMPLEX_F_C_MA)
+@@ -6586,7 +6594,7 @@ (define_expand "cmul<conj_op><mode>3"
+ (define_insn "<avx512>_<complexopname>_<mode><maskc_name><round_name>"
+ [(set (match_operand:VF_AVX512FP16VL 0 "register_operand" "=&v")
+ (unspec:VF_AVX512FP16VL
+- [(match_operand:VF_AVX512FP16VL 1 "nonimmediate_operand" "%v")
++ [(match_operand:VF_AVX512FP16VL 1 "nonimmediate_operand" "<int_comm>v")
+ (match_operand:VF_AVX512FP16VL 2 "nonimmediate_operand" "<round_constraint>")]
+ UNSPEC_COMPLEX_F_C_MUL))]
+ "TARGET_AVX512FP16 && <round_mode512bit_condition>"
+@@ -25973,17 +25981,21 @@ (define_expand "maskload<mode><sseintvecmodelower>"
+ "TARGET_AVX")
+
+ (define_expand "maskload<mode><avx512fmaskmodelower>"
+- [(set (match_operand:V48H_AVX512VL 0 "register_operand")
+- (vec_merge:V48H_AVX512VL
+- (match_operand:V48H_AVX512VL 1 "memory_operand")
++ [(set (match_operand:V48_AVX512VL 0 "register_operand")
++ (vec_merge:V48_AVX512VL
++ (unspec:V48_AVX512VL
++ [(match_operand:V48_AVX512VL 1 "memory_operand")]
++ UNSPEC_MASKLOAD)
+ (match_dup 0)
+ (match_operand:<avx512fmaskmode> 2 "register_operand")))]
+ "TARGET_AVX512F")
+
+ (define_expand "maskload<mode><avx512fmaskmodelower>"
+- [(set (match_operand:VI12_AVX512VL 0 "register_operand")
+- (vec_merge:VI12_AVX512VL
+- (match_operand:VI12_AVX512VL 1 "memory_operand")
++ [(set (match_operand:VI12HF_AVX512VL 0 "register_operand")
++ (vec_merge:VI12HF_AVX512VL
++ (unspec:VI12HF_AVX512VL
++ [(match_operand:VI12HF_AVX512VL 1 "memory_operand")]
++ UNSPEC_MASKLOAD)
+ (match_dup 0)
+ (match_operand:<avx512fmaskmode> 2 "register_operand")))]
+ "TARGET_AVX512BW")
+@@ -25998,21 +26010,66 @@ (define_expand "maskstore<mode><sseintvecmodelower>"
+ "TARGET_AVX")
+
+ (define_expand "maskstore<mode><avx512fmaskmodelower>"
+- [(set (match_operand:V48H_AVX512VL 0 "memory_operand")
+- (vec_merge:V48H_AVX512VL
+- (match_operand:V48H_AVX512VL 1 "register_operand")
+- (match_dup 0)
+- (match_operand:<avx512fmaskmode> 2 "register_operand")))]
++ [(set (match_operand:V48_AVX512VL 0 "memory_operand")
++ (unspec:V48_AVX512VL
++ [(match_operand:V48_AVX512VL 1 "register_operand")
++ (match_dup 0)
++ (match_operand:<avx512fmaskmode> 2 "register_operand")]
++ UNSPEC_MASKMOV))]
+ "TARGET_AVX512F")
+
+ (define_expand "maskstore<mode><avx512fmaskmodelower>"
+- [(set (match_operand:VI12_AVX512VL 0 "memory_operand")
+- (vec_merge:VI12_AVX512VL
+- (match_operand:VI12_AVX512VL 1 "register_operand")
+- (match_dup 0)
+- (match_operand:<avx512fmaskmode> 2 "register_operand")))]
++ [(set (match_operand:VI12HF_AVX512VL 0 "memory_operand")
++ (unspec:VI12HF_AVX512VL
++ [(match_operand:VI12HF_AVX512VL 1 "register_operand")
++ (match_dup 0)
++ (match_operand:<avx512fmaskmode> 2 "register_operand")]
++ UNSPEC_MASKMOV))]
+ "TARGET_AVX512BW")
+
++(define_insn "<avx512>_store<mode>_mask"
++ [(set (match_operand:V48_AVX512VL 0 "memory_operand" "=m")
++ (unspec:V48_AVX512VL
++ [(match_operand:V48_AVX512VL 1 "register_operand" "v")
++ (match_dup 0)
++ (match_operand:<avx512fmaskmode> 2 "register_operand" "Yk")]
++ UNSPEC_MASKMOV))]
++ "TARGET_AVX512F"
++{
++ if (FLOAT_MODE_P (GET_MODE_INNER (<MODE>mode)))
++ {
++ if (misaligned_operand (operands[0], <MODE>mode))
++ return "vmovu<ssemodesuffix>\t{%1, %0%{%2%}|%0%{%2%}, %1}";
++ else
++ return "vmova<ssemodesuffix>\t{%1, %0%{%2%}|%0%{%2%}, %1}";
++ }
++ else
++ {
++ if (misaligned_operand (operands[0], <MODE>mode))
++ return "vmovdqu<ssescalarsize>\t{%1, %0%{%2%}|%0%{%2%}, %1}";
++ else
++ return "vmovdqa<ssescalarsize>\t{%1, %0%{%2%}|%0%{%2%}, %1}";
++ }
++}
++ [(set_attr "type" "ssemov")
++ (set_attr "prefix" "evex")
++ (set_attr "memory" "store")
++ (set_attr "mode" "<sseinsnmode>")])
++
++(define_insn "<avx512>_store<mode>_mask"
++ [(set (match_operand:VI12HF_AVX512VL 0 "memory_operand" "=m")
++ (unspec:VI12HF_AVX512VL
++ [(match_operand:VI12HF_AVX512VL 1 "register_operand" "v")
++ (match_dup 0)
++ (match_operand:<avx512fmaskmode> 2 "register_operand" "Yk")]
++ UNSPEC_MASKMOV))]
++ "TARGET_AVX512BW"
++ "vmovdqu<ssescalarsize>\t{%1, %0%{%2%}|%0%{%2%}, %1}"
++ [(set_attr "type" "ssemov")
++ (set_attr "prefix" "evex")
++ (set_attr "memory" "store")
++ (set_attr "mode" "<sseinsnmode>")])
++
+ (define_expand "cbranch<mode>4"
+ [(set (reg:CC FLAGS_REG)
+ (compare:CC (match_operand:VI48_AVX 1 "register_operand")
+--- a/src/gcc/config/i386/x86-tune.def
++++ b/src/gcc/config/i386/x86-tune.def
+@@ -467,7 +467,8 @@ DEF_TUNE (X86_TUNE_AVOID_4BYTE_PREFIXES, "avoid_4byte_prefixes",
+ /* X86_TUNE_USE_GATHER_2PARTS: Use gather instructions for vectors with 2
+ elements. */
+ DEF_TUNE (X86_TUNE_USE_GATHER_2PARTS, "use_gather_2parts",
+- ~(m_ZNVER1 | m_ZNVER2 | m_ZNVER3 | m_ZNVER4 | m_ALDERLAKE | m_GENERIC))
++ ~(m_ZNVER1 | m_ZNVER2 | m_ZNVER3 | m_ZNVER4 | m_ALDERLAKE
++ | m_GENERIC | m_GDS))
+
+ /* X86_TUNE_USE_SCATTER_2PARTS: Use scater instructions for vectors with 2
+ elements. */
+@@ -477,7 +478,8 @@ DEF_TUNE (X86_TUNE_USE_SCATTER_2PARTS, "use_scatter_2parts",
+ /* X86_TUNE_USE_GATHER_4PARTS: Use gather instructions for vectors with 4
+ elements. */
+ DEF_TUNE (X86_TUNE_USE_GATHER_4PARTS, "use_gather_4parts",
+- ~(m_ZNVER1 | m_ZNVER2 | m_ZNVER3 | m_ZNVER4 | m_ALDERLAKE | m_GENERIC))
++ ~(m_ZNVER1 | m_ZNVER2 | m_ZNVER3 | m_ZNVER4 | m_ALDERLAKE
++ | m_GENERIC | m_GDS))
+
+ /* X86_TUNE_USE_SCATTER_4PARTS: Use scater instructions for vectors with 4
+ elements. */
+@@ -486,12 +488,13 @@ DEF_TUNE (X86_TUNE_USE_SCATTER_4PARTS, "use_scatter_4parts",
+
+ /* X86_TUNE_USE_GATHER: Use gather instructions for vectors with 8 or more
+ elements. */
+-DEF_TUNE (X86_TUNE_USE_GATHER, "use_gather",
+- ~(m_ZNVER1 | m_ZNVER2 | m_ZNVER4 | m_ALDERLAKE | m_GENERIC))
++DEF_TUNE (X86_TUNE_USE_GATHER_8PARTS, "use_gather_8parts",
++ ~(m_ZNVER1 | m_ZNVER2 | m_ZNVER4 | m_ALDERLAKE
++ | m_GENERIC | m_GDS))
+
+ /* X86_TUNE_USE_SCATTER: Use scater instructions for vectors with 8 or more
+ elements. */
+-DEF_TUNE (X86_TUNE_USE_SCATTER, "use_scatter",
++DEF_TUNE (X86_TUNE_USE_SCATTER_8PARTS, "use_scatter_8parts",
+ ~(m_ZNVER4))
+
+ /* X86_TUNE_AVOID_128FMA_CHAINS: Avoid creating loops with tight 128bit or
+--- a/src/gcc/config/loongarch/loongarch.cc
++++ b/src/gcc/config/loongarch/loongarch.cc
+@@ -1098,7 +1098,9 @@ loongarch_first_stack_step (struct loongarch_frame_info *frame)
+ static void
+ loongarch_emit_stack_tie (void)
+ {
+- emit_insn (gen_stack_tie (Pmode, stack_pointer_rtx, hard_frame_pointer_rtx));
++ emit_insn (gen_stack_tie (Pmode, stack_pointer_rtx,
++ frame_pointer_needed ? hard_frame_pointer_rtx
++ : stack_pointer_rtx));
+ }
+
+ #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
+--- a/src/gcc/config/loongarch/loongarch.md
++++ b/src/gcc/config/loongarch/loongarch.md
+@@ -2622,6 +2622,10 @@ (define_insn "*jump_pic"
+ }
+ [(set_attr "type" "branch")])
+
++;; Micro-architecture unconditionally treats a "jr $ra" as "return from subroutine",
++;; non-returning indirect jumps through $ra would interfere with both subroutine
++;; return prediction and the more general indirect branch prediction.
++
+ (define_expand "indirect_jump"
+ [(set (pc) (match_operand 0 "register_operand"))]
+ ""
+@@ -2632,7 +2636,7 @@ (define_expand "indirect_jump"
+ })
+
+ (define_insn "@indirect_jump<mode>"
+- [(set (pc) (match_operand:P 0 "register_operand" "r"))]
++ [(set (pc) (match_operand:P 0 "register_operand" "e"))]
+ ""
+ "jr\t%0"
+ [(set_attr "type" "jump")
+@@ -2655,7 +2659,7 @@ (define_expand "tablejump"
+
+ (define_insn "@tablejump<mode>"
+ [(set (pc)
+- (match_operand:P 0 "register_operand" "r"))
++ (match_operand:P 0 "register_operand" "e"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "jr\t%0"
+--- a/src/gcc/config/loongarch/t-loongarch
++++ b/src/gcc/config/loongarch/t-loongarch
+@@ -16,6 +16,10 @@
+ # along with GCC; see the file COPYING3. If not see
+ # <http://www.gnu.org/licenses/>.
+
++TM_H += $(srcdir)/config/loongarch/loongarch-driver.h
++OPTIONS_H_EXTRA += $(srcdir)/config/loongarch/loongarch-def.h \
++ $(srcdir)/config/loongarch/loongarch-tune.h
++
+ # Canonical target triplet from config.gcc
+ LA_MULTIARCH_TRIPLET = $(patsubst LA_MULTIARCH_TRIPLET=%,%,$\
+ $(filter LA_MULTIARCH_TRIPLET=%,$(tm_defines)))
+--- a/src/gcc/config/riscv/riscv.cc
++++ b/src/gcc/config/riscv/riscv.cc
+@@ -5600,9 +5600,8 @@ riscv_asan_shadow_offset (void)
+ /* We only have libsanitizer support for RV64 at present.
+
+ This number must match kRiscv*_ShadowOffset* in the file
+- libsanitizer/asan/asan_mapping.h which is currently 1<<29 for rv64,
+- even though 1<<36 makes more sense. */
+- return TARGET_64BIT ? (HOST_WIDE_INT_1 << 29) : 0;
++ libsanitizer/asan/asan_mapping.h. */
++ return TARGET_64BIT ? HOST_WIDE_INT_UC (0xd55550000) : 0;
+ }
+
+ /* Initialize the GCC target structure. */
+--- a/src/gcc/config/rs6000/altivec.md
++++ b/src/gcc/config/rs6000/altivec.md
+@@ -385,14 +385,22 @@ (define_split
+
+ (define_insn_and_split "sldoi_to_mov<mode>"
+ [(set (match_operand:VM 0 "altivec_register_operand")
+- (unspec:VM [(match_operand:VM 1 "easy_vector_constant")
++ (unspec:VM [(match_operand:VM 1 "const_vector_each_byte_same")
+ (match_dup 1)
+ (match_operand:QI 2 "u5bit_cint_operand")]
+ UNSPEC_VSLDOI))]
+- "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode) && can_create_pseudo_p ()"
++ "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode) && can_create_pseudo_p ()"
+ "#"
+ "&& 1"
+- [(set (match_dup 0) (match_dup 1))])
++ [(set (match_dup 0) (match_dup 1))]
++ "{
++ if (!easy_vector_constant (operands[1], <MODE>mode))
++ {
++ rtx dest = gen_reg_rtx (<MODE>mode);
++ emit_move_insn (dest, operands[1]);
++ operands[1] = dest;
++ }
++ }")
+
+ (define_insn "get_vrsave_internal"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+--- a/src/gcc/config/rs6000/fusion.md
++++ b/src/gcc/config/rs6000/fusion.md
+@@ -22,7 +22,7 @@
+ ;; load mode is DI result mode is clobber compare mode is CC extend is none
+ (define_insn_and_split "*ld_cmpdi_cr0_DI_clobber_CC_none"
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x")
+- (compare:CC (match_operand:DI 1 "ds_form_mem_operand" "m")
++ (compare:CC (match_operand:DI 1 "non_update_memory_operand" "YZ")
+ (match_operand:DI 3 "const_m1_to_1_operand" "n")))
+ (clobber (match_scratch:DI 0 "=r"))]
+ "(TARGET_P10_FUSION)"
+@@ -43,7 +43,7 @@ (define_insn_and_split "*ld_cmpdi_cr0_DI_clobber_CC_none"
+ ;; load mode is DI result mode is clobber compare mode is CCUNS extend is none
+ (define_insn_and_split "*ld_cmpldi_cr0_DI_clobber_CCUNS_none"
+ [(set (match_operand:CCUNS 2 "cc_reg_operand" "=x")
+- (compare:CCUNS (match_operand:DI 1 "ds_form_mem_operand" "m")
++ (compare:CCUNS (match_operand:DI 1 "non_update_memory_operand" "YZ")
+ (match_operand:DI 3 "const_0_to_1_operand" "n")))
+ (clobber (match_scratch:DI 0 "=r"))]
+ "(TARGET_P10_FUSION)"
+@@ -64,7 +64,7 @@ (define_insn_and_split "*ld_cmpldi_cr0_DI_clobber_CCUNS_none"
+ ;; load mode is DI result mode is DI compare mode is CC extend is none
+ (define_insn_and_split "*ld_cmpdi_cr0_DI_DI_CC_none"
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x")
+- (compare:CC (match_operand:DI 1 "ds_form_mem_operand" "m")
++ (compare:CC (match_operand:DI 1 "non_update_memory_operand" "YZ")
+ (match_operand:DI 3 "const_m1_to_1_operand" "n")))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r") (match_dup 1))]
+ "(TARGET_P10_FUSION)"
+@@ -85,7 +85,7 @@ (define_insn_and_split "*ld_cmpdi_cr0_DI_DI_CC_none"
+ ;; load mode is DI result mode is DI compare mode is CCUNS extend is none
+ (define_insn_and_split "*ld_cmpldi_cr0_DI_DI_CCUNS_none"
+ [(set (match_operand:CCUNS 2 "cc_reg_operand" "=x")
+- (compare:CCUNS (match_operand:DI 1 "ds_form_mem_operand" "m")
++ (compare:CCUNS (match_operand:DI 1 "non_update_memory_operand" "YZ")
+ (match_operand:DI 3 "const_0_to_1_operand" "n")))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r") (match_dup 1))]
+ "(TARGET_P10_FUSION)"
+@@ -104,17 +104,17 @@ (define_insn_and_split "*ld_cmpldi_cr0_DI_DI_CCUNS_none"
+
+ ;; load-cmpi fusion pattern generated by gen_ld_cmpi_p10
+ ;; load mode is SI result mode is clobber compare mode is CC extend is none
+-(define_insn_and_split "*lwa_cmpdi_cr0_SI_clobber_CC_none"
++(define_insn_and_split "*lwz_cmpwi_cr0_SI_clobber_CC_none"
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x")
+- (compare:CC (match_operand:SI 1 "ds_form_mem_operand" "m")
++ (compare:CC (match_operand:SI 1 "non_update_memory_operand" "m")
+ (match_operand:SI 3 "const_m1_to_1_operand" "n")))
+ (clobber (match_scratch:SI 0 "=r"))]
+ "(TARGET_P10_FUSION)"
+- "lwa%X1 %0,%1\;cmpdi %2,%0,%3"
++ "lwz%X1 %0,%1\;cmpwi %2,%0,%3"
+ "&& reload_completed
+ && (cc_reg_not_cr0_operand (operands[2], CCmode)
+ || !address_is_non_pfx_d_or_x (XEXP (operands[1], 0),
+- SImode, NON_PREFIXED_DS))"
++ SImode, NON_PREFIXED_D))"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0) (match_dup 3)))]
+@@ -146,17 +146,17 @@ (define_insn_and_split "*lwz_cmpldi_cr0_SI_clobber_CCUNS_none"
+
+ ;; load-cmpi fusion pattern generated by gen_ld_cmpi_p10
+ ;; load mode is SI result mode is SI compare mode is CC extend is none
+-(define_insn_and_split "*lwa_cmpdi_cr0_SI_SI_CC_none"
++(define_insn_and_split "*lwz_cmpwi_cr0_SI_SI_CC_none"
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x")
+- (compare:CC (match_operand:SI 1 "ds_form_mem_operand" "m")
++ (compare:CC (match_operand:SI 1 "non_update_memory_operand" "m")
+ (match_operand:SI 3 "const_m1_to_1_operand" "n")))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r") (match_dup 1))]
+ "(TARGET_P10_FUSION)"
+- "lwa%X1 %0,%1\;cmpdi %2,%0,%3"
++ "lwz%X1 %0,%1\;cmpwi %2,%0,%3"
+ "&& reload_completed
+ && (cc_reg_not_cr0_operand (operands[2], CCmode)
+ || !address_is_non_pfx_d_or_x (XEXP (operands[1], 0),
+- SImode, NON_PREFIXED_DS))"
++ SImode, NON_PREFIXED_D))"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0) (match_dup 3)))]
+@@ -190,7 +190,7 @@ (define_insn_and_split "*lwz_cmpldi_cr0_SI_SI_CCUNS_none"
+ ;; load mode is SI result mode is EXTSI compare mode is CC extend is sign
+ (define_insn_and_split "*lwa_cmpdi_cr0_SI_EXTSI_CC_sign"
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x")
+- (compare:CC (match_operand:SI 1 "ds_form_mem_operand" "m")
++ (compare:CC (match_operand:SI 1 "non_update_memory_operand" "YZ")
+ (match_operand:SI 3 "const_m1_to_1_operand" "n")))
+ (set (match_operand:EXTSI 0 "gpc_reg_operand" "=r") (sign_extend:EXTSI (match_dup 1)))]
+ "(TARGET_P10_FUSION)"
+@@ -205,6 +205,7 @@ (define_insn_and_split "*lwa_cmpdi_cr0_SI_EXTSI_CC_sign"
+ ""
+ [(set_attr "type" "fused_load_cmpi")
+ (set_attr "cost" "8")
++ (set_attr "sign_extend" "yes")
+ (set_attr "length" "8")])
+
+ ;; load-cmpi fusion pattern generated by gen_ld_cmpi_p10
+--- a/src/gcc/config/rs6000/genfusion.pl
++++ b/src/gcc/config/rs6000/genfusion.pl
+@@ -53,92 +53,136 @@ sub mode_to_ldst_char
+ return '?';
+ }
+
++sub gen_ld_cmpi_p10_one
++{
++ my ($lmode, $result, $ccmode) = @_;
++
++ my $np = "NON_PREFIXED_D";
++ my $mempred = "non_update_memory_operand";
++ my $extend;
++
++ # We need to special case lwa. The prefixed_load_p function in rs6000.cc
++ # (which determines if a load instruction is prefixed) uses the fact that the
++ # register mode is different from the memory mode, and that the sign_extend
++ # attribute is set to use DS-form rules for the address instead of D-form.
++ # If the register size is the same, prefixed_load_p assumes we are doing a
++ # lwz. We change to use an lwz and word compare if we don't need to sign
++ # extend the SImode value. Otherwise if we need the value, we need to
++ # make sure the insn is marked as ds-form.
++ my $cmp_size_char = ($lmode eq "SI"
++ && $ccmode eq "CC"
++ && $result !~ /^EXT|^DI$/) ? "w" : "d";
++
++ if ($ccmode eq "CC") {
++ # ld and lwa are both DS-FORM.
++ ($lmode eq "DI") and $np = "NON_PREFIXED_DS";
++ ($lmode eq "SI" && $cmp_size_char eq "d") and $np = "NON_PREFIXED_DS";
++ } else {
++ if ($lmode eq "DI") {
++ # ld is DS-form, but lwz is not.
++ $np = "NON_PREFIXED_DS";
++ }
++ }
++
++ my $cmpl = ($ccmode eq "CC") ? "" : "l";
++ my $echr = ($ccmode eq "CC" && $cmp_size_char eq "d") ? "a" : "z";
++ if ($lmode eq "DI") { $echr = ""; }
++ my $constpred = ($ccmode eq "CC") ? "const_m1_to_1_operand"
++ : "const_0_to_1_operand";
++
++ # For clobber, we need a SI/DI reg in case we
++ # split because we have to sign/zero extend.
++ my $clobbermode = ($lmode =~ /^[QH]I$/) ? "GPR" : $lmode;
++ if ($result =~ /^EXT/ || $result eq "GPR" || $clobbermode eq "GPR") {
++ # We always need extension if result > lmode.
++ $extend = ($ccmode eq "CC") ? "sign" : "zero";
++ } else {
++ # Result of SI/DI does not need sign extension.
++ $extend = "none";
++ }
++
++ my $ldst = mode_to_ldst_char($lmode);
++
++ # DS-form addresses need YZ, and not m.
++ my $constraint = ($np eq "NON_PREFIXED_DS") ? "YZ" : "m";
++ print <<HERE;
++;; load-cmpi fusion pattern generated by gen_ld_cmpi_p10
++;; load mode is $lmode result mode is $result compare mode is $ccmode extend is $extend
++(define_insn_and_split "*l${ldst}${echr}_cmp${cmpl}${cmp_size_char}i_cr0_${lmode}_${result}_${ccmode}_${extend}"
++ [(set (match_operand:${ccmode} 2 "cc_reg_operand" "=x")
++ (compare:${ccmode} (match_operand:${lmode} 1 "${mempred}" "${constraint}")
++HERE
++ print " " if $ccmode eq "CCUNS";
++print <<HERE;
++ (match_operand:${lmode} 3 "${constpred}" "n")))
++HERE
++
++ if ($result eq "clobber") {
++ print <<HERE;
++ (clobber (match_scratch:${clobbermode} 0 "=r"))]
++HERE
++ } elsif ($result eq $lmode) {
++ print <<HERE;
++ (set (match_operand:${result} 0 "gpc_reg_operand" "=r") (match_dup 1))]
++HERE
++ } else {
++ print <<HERE;
++ (set (match_operand:${result} 0 "gpc_reg_operand" "=r") (${extend}_extend:${result} (match_dup 1)))]
++HERE
++ }
++
++ print <<HERE;
++ "(TARGET_P10_FUSION)"
++ "l${ldst}${echr}%X1 %0,%1\\;cmp${cmpl}${cmp_size_char}i %2,%0,%3"
++ "&& reload_completed
++ && (cc_reg_not_cr0_operand (operands[2], CCmode)
++ || !address_is_non_pfx_d_or_x (XEXP (operands[1], 0),
++ ${lmode}mode, ${np}))"
++HERE
++
++ if ($extend eq "none") {
++ print " [(set (match_dup 0) (match_dup 1))\n";
++ } elsif ($result eq "clobber") {
++ print " [(set (match_dup 0) (${extend}_extend:${clobbermode} (match_dup 1)))\n";
++ } else {
++ print " [(set (match_dup 0) (${extend}_extend:${result} (match_dup 1)))\n";
++ }
++
++ print <<HERE;
++ (set (match_dup 2)
++ (compare:${ccmode} (match_dup 0) (match_dup 3)))]
++ ""
++ [(set_attr "type" "fused_load_cmpi")
++ (set_attr "cost" "8")
++HERE
++
++ if ($lmode eq "SI" && $ccmode eq "CC" && $cmp_size_char eq "d") {
++ # prefixed_load_p needs the sign_extend attribute to validate lwa as a
++ # DS-form instruction instead of D-form.
++ print " (set_attr \"sign_extend\" \"yes\")\n";
++ }
++
++ print <<HERE
++ (set_attr "length" "8")])
++
++HERE
++}
++
+ sub gen_ld_cmpi_p10
+ {
+- my ($lmode, $ldst, $clobbermode, $result, $cmpl, $echr, $constpred,
+- $mempred, $ccmode, $np, $extend, $resultmode);
+- LMODE: foreach $lmode ('DI','SI','HI','QI') {
+- $ldst = mode_to_ldst_char($lmode);
+- $clobbermode = $lmode;
+- # For clobber, we need a SI/DI reg in case we
+- # split because we have to sign/zero extend.
+- if ($lmode eq 'HI' || $lmode eq 'QI') { $clobbermode = "GPR"; }
+- RESULT: foreach $result ('clobber', $lmode, "EXT".$lmode) {
+- # EXTDI does not exist, and we cannot directly produce HI/QI results.
+- next RESULT if $result eq "EXTDI" || $result eq "HI" || $result eq "QI";
+- # Don't allow EXTQI because that would allow HI result which we can't do.
+- $result = "GPR" if $result eq "EXTQI";
+- CCMODE: foreach $ccmode ('CC','CCUNS') {
+- $np = "NON_PREFIXED_D";
+- $mempred = "non_update_memory_operand";
+- if ( $ccmode eq 'CC' ) {
+- next CCMODE if $lmode eq 'QI';
+- if ( $lmode eq 'DI' || $lmode eq 'SI' ) {
+- # ld and lwa are both DS-FORM.
+- $np = "NON_PREFIXED_DS";
+- $mempred = "ds_form_mem_operand";
+- }
+- $cmpl = "";
+- $echr = "a";
+- $constpred = "const_m1_to_1_operand";
+- } else {
+- if ( $lmode eq 'DI' ) {
+- # ld is DS-form, but lwz is not.
+- $np = "NON_PREFIXED_DS";
+- $mempred = "ds_form_mem_operand";
+- }
+- $cmpl = "l";
+- $echr = "z";
+- $constpred = "const_0_to_1_operand";
+- }
+- if ($lmode eq 'DI') { $echr = ""; }
+- if ($result =~ m/^EXT/ || $result eq 'GPR' || $clobbermode eq 'GPR') {
+- # We always need extension if result > lmode.
+- if ( $ccmode eq 'CC' ) {
+- $extend = "sign";
+- } else {
+- $extend = "zero";
+- }
+- } else {
+- # Result of SI/DI does not need sign extension.
+- $extend = "none";
+- }
+- print ";; load-cmpi fusion pattern generated by gen_ld_cmpi_p10\n";
+- print ";; load mode is $lmode result mode is $result compare mode is $ccmode extend is $extend\n";
+-
+- print "(define_insn_and_split \"*l${ldst}${echr}_cmp${cmpl}di_cr0_${lmode}_${result}_${ccmode}_${extend}\"\n";
+- print " [(set (match_operand:${ccmode} 2 \"cc_reg_operand\" \"=x\")\n";
+- print " (compare:${ccmode} (match_operand:${lmode} 1 \"${mempred}\" \"m\")\n";
+- if ($ccmode eq 'CCUNS') { print " "; }
+- print " (match_operand:${lmode} 3 \"${constpred}\" \"n\")))\n";
+- if ($result eq 'clobber') {
+- print " (clobber (match_scratch:${clobbermode} 0 \"=r\"))]\n";
+- } elsif ($result eq $lmode) {
+- print " (set (match_operand:${result} 0 \"gpc_reg_operand\" \"=r\") (match_dup 1))]\n";
+- } else {
+- print " (set (match_operand:${result} 0 \"gpc_reg_operand\" \"=r\") (${extend}_extend:${result} (match_dup 1)))]\n";
+- }
+- print " \"(TARGET_P10_FUSION)\"\n";
+- print " \"l${ldst}${echr}%X1 %0,%1\\;cmp${cmpl}di %2,%0,%3\"\n";
+- print " \"&& reload_completed\n";
+- print " && (cc_reg_not_cr0_operand (operands[2], CCmode)\n";
+- print " || !address_is_non_pfx_d_or_x (XEXP (operands[1], 0),\n";
+- print " ${lmode}mode, ${np}))\"\n";
+-
+- if ($extend eq "none") {
+- print " [(set (match_dup 0) (match_dup 1))\n";
+- } else {
+- $resultmode = $result;
+- if ( $result eq 'clobber' ) { $resultmode = $clobbermode }
+- print " [(set (match_dup 0) (${extend}_extend:${resultmode} (match_dup 1)))\n";
+- }
+- print " (set (match_dup 2)\n";
+- print " (compare:${ccmode} (match_dup 0) (match_dup 3)))]\n";
+- print " \"\"\n";
+- print " [(set_attr \"type\" \"fused_load_cmpi\")\n";
+- print " (set_attr \"cost\" \"8\")\n";
+- print " (set_attr \"length\" \"8\")])\n";
+- print "\n";
++ foreach my $lmode (qw/DI SI HI QI/) {
++ foreach my $result ("clobber", $lmode, "EXT$lmode") {
++ # EXTDI does not exist, and we cannot directly produce HI/QI results.
++ next if $result =~ /^(QI|HI|EXTDI)$/;
++
++ # Don't allow EXTQI because that would allow HI result which we can't do.
++ $result = "GPR" if $result eq "EXTQI";
++
++ foreach my $ccmode (qw/CC CCUNS/) {
++ # We do not have signed single-byte loads.
++ next if ($lmode eq "QI" and $ccmode eq "CC");
++
++ gen_ld_cmpi_p10_one($lmode, $result, $ccmode);
+ }
+ }
+ }
+--- a/src/gcc/config/rs6000/predicates.md
++++ b/src/gcc/config/rs6000/predicates.md
+@@ -798,6 +798,43 @@ (define_predicate "easy_vector_constant_vsldoi"
+ (and (match_test "easy_altivec_constant (op, mode)")
+ (match_test "vspltis_shifted (op) != 0")))))
+
++;; Return true if this is a vector constant and each byte in
++;; it is the same.
++(define_predicate "const_vector_each_byte_same"
++ (match_code "const_vector")
++{
++ rtx elt;
++ if (!const_vec_duplicate_p (op, &elt))
++ return false;
++
++ machine_mode emode = GET_MODE_INNER (mode);
++ unsigned HOST_WIDE_INT eval;
++ if (CONST_INT_P (elt))
++ eval = INTVAL (elt);
++ else if (CONST_DOUBLE_AS_FLOAT_P (elt))
++ {
++ gcc_assert (emode == SFmode || emode == DFmode);
++ long l[2];
++ real_to_target (l, CONST_DOUBLE_REAL_VALUE (elt), emode);
++ /* real_to_target puts 32-bit pieces in each long. */
++ eval = zext_hwi (l[0], 32);
++ eval |= zext_hwi (l[1], 32) << 32;
++ }
++ else
++ return false;
++
++ unsigned int esize = GET_MODE_SIZE (emode);
++ unsigned char byte0 = eval & 0xff;
++ for (unsigned int i = 1; i < esize; i++)
++ {
++ eval >>= BITS_PER_UNIT;
++ if (byte0 != (eval & 0xff))
++ return false;
++ }
++
++ return true;
++})
++
+ ;; Return 1 if operand is a vector int register or is either a vector constant
+ ;; of all 0 bits of a vector constant of all 1 bits.
+ (define_predicate "vector_int_reg_or_same_bit"
+@@ -1088,20 +1125,6 @@ (define_predicate "lwa_operand"
+ return INTVAL (offset) % 4 == 0;
+ })
+
+-;; Return 1 if the operand is a memory operand that has a valid address for
+-;; a DS-form instruction. I.e. the address has to be either just a register,
+-;; or register + const where the two low order bits of const are zero.
+-(define_predicate "ds_form_mem_operand"
+- (match_code "subreg,mem")
+-{
+- if (!any_memory_operand (op, mode))
+- return false;
+-
+- rtx addr = XEXP (op, 0);
+-
+- return address_to_insn_form (addr, mode, NON_PREFIXED_DS) == INSN_FORM_DS;
+-})
+-
+ ;; Return 1 if the operand, used inside a MEM, is a SYMBOL_REF.
+ (define_predicate "symbol_ref_operand"
+ (and (match_code "symbol_ref")
+--- a/src/gcc/config/rs6000/rs6000-builtins.def
++++ b/src/gcc/config/rs6000/rs6000-builtins.def
+@@ -2008,6 +2008,13 @@
+ const vsll __builtin_vsx_xxspltd_2di (vsll, const int<1>);
+ XXSPLTD_V2DI vsx_xxspltd_v2di {}
+
++ const vsq __builtin_pack_vector_int128 (unsigned long long, \
++ unsigned long long);
++ PACK_V1TI packv1ti {}
++
++ const unsigned long __builtin_unpack_vector_int128 (vsq, const int<1>);
++ UNPACK_V1TI unpackv1ti {}
++
+
+ ; Power7 builtins (ISA 2.06).
+ [power7]
+@@ -2029,16 +2036,9 @@
+ const unsigned int __builtin_divweu (unsigned int, unsigned int);
+ DIVWEU diveu_si {}
+
+- const vsq __builtin_pack_vector_int128 (unsigned long long, \
+- unsigned long long);
+- PACK_V1TI packv1ti {}
+-
+ void __builtin_ppc_speculation_barrier ();
+ SPECBARR speculation_barrier {}
+
+- const unsigned long __builtin_unpack_vector_int128 (vsq, const int<1>);
+- UNPACK_V1TI unpackv1ti {}
+-
+
+ ; Power7 builtins requiring 64-bit GPRs (even with 32-bit addressing).
+ [power7-64]
+@@ -2796,6 +2796,19 @@
+ const vsi __builtin_vsx_xxbrw_v4si (vsi);
+ XXBRW_V4SI p9_xxbrw_v4si {}
+
++ const signed int __builtin_vsx_scalar_cmp_exp_qp_eq (_Float128, _Float128);
++ VSCEQPEQ xscmpexpqp_eq_kf {}
++
++ const signed int __builtin_vsx_scalar_cmp_exp_qp_gt (_Float128, _Float128);
++ VSCEQPGT xscmpexpqp_gt_kf {}
++
++ const signed int __builtin_vsx_scalar_cmp_exp_qp_lt (_Float128, _Float128);
++ VSCEQPLT xscmpexpqp_lt_kf {}
++
++ const signed int \
++ __builtin_vsx_scalar_cmp_exp_qp_unordered (_Float128, _Float128);
++ VSCEQPUO xscmpexpqp_unordered_kf {}
++
+
+ ; Miscellaneous P9 functions
+ [power9]
+@@ -2878,19 +2891,6 @@
+ fpmath _Float128 __builtin_mulf128_round_to_odd (_Float128, _Float128);
+ MULF128_ODD mulkf3_odd {}
+
+- const signed int __builtin_vsx_scalar_cmp_exp_qp_eq (_Float128, _Float128);
+- VSCEQPEQ xscmpexpqp_eq_kf {}
+-
+- const signed int __builtin_vsx_scalar_cmp_exp_qp_gt (_Float128, _Float128);
+- VSCEQPGT xscmpexpqp_gt_kf {}
+-
+- const signed int __builtin_vsx_scalar_cmp_exp_qp_lt (_Float128, _Float128);
+- VSCEQPLT xscmpexpqp_lt_kf {}
+-
+- const signed int \
+- __builtin_vsx_scalar_cmp_exp_qp_unordered (_Float128, _Float128);
+- VSCEQPUO xscmpexpqp_unordered_kf {}
+-
+ fpmath _Float128 __builtin_sqrtf128_round_to_odd (_Float128);
+ SQRTF128_ODD sqrtkf2_odd {}
+
+--- a/src/gcc/config/rs6000/rs6000-string.cc
++++ b/src/gcc/config/rs6000/rs6000-string.cc
+@@ -2811,11 +2811,17 @@ expand_block_move (rtx operands[], bool might_overlap)
+ gen_func.mov = gen_vsx_movv2di_64bit;
+ }
+ else if (TARGET_BLOCK_OPS_UNALIGNED_VSX
+- && TARGET_POWER10 && bytes < 16
++ /* Only use lxvl/stxvl on 64bit POWER10. */
++ && TARGET_POWER10
++ && TARGET_64BIT
++ && bytes < 16
+ && orig_bytes > 16
+- && !(bytes == 1 || bytes == 2
+- || bytes == 4 || bytes == 8)
+- && (align >= 128 || !STRICT_ALIGNMENT))
++ && !(bytes == 1
++ || bytes == 2
++ || bytes == 4
++ || bytes == 8)
++ && (align >= 128
++ || !STRICT_ALIGNMENT))
+ {
+ /* Only use lxvl/stxvl if it could replace multiple ordinary
+ loads+stores. Also don't use it unless we likely already
+--- a/src/gcc/config/rs6000/rs6000.cc
++++ b/src/gcc/config/rs6000/rs6000.cc
+@@ -8128,7 +8128,8 @@ darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
+ type = TREE_TYPE (type);
+ } while (AGGREGATE_TYPE_P (type));
+
+- if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
++ if (type != error_mark_node && ! AGGREGATE_TYPE_P (type)
++ && ! TYPE_PACKED (type) && maximum_field_alignment == 0)
+ align = MAX (align, TYPE_ALIGN (type));
+
+ return align;
+@@ -17116,7 +17117,7 @@ output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
+ if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
+ REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
+ else
+- REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
++ real_to_target (k, CONST_DOUBLE_REAL_VALUE (x), GET_MODE (x));
+
+ if (TARGET_64BIT)
+ {
+@@ -21906,7 +21907,9 @@ rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
+ *total = rs6000_cost->divsi;
+ }
+ /* Add in shift and subtract for MOD unless we have a mod instruction. */
+- if (!TARGET_MODULO && (code == MOD || code == UMOD))
++ if ((!TARGET_MODULO
++ || (RS6000_DISABLE_SCALAR_MODULO && SCALAR_INT_MODE_P (mode)))
++ && (code == MOD || code == UMOD))
+ *total += COSTS_N_INSNS (2);
+ return false;
+
+@@ -28603,7 +28606,6 @@ vec_const_128bit_to_bytes (rtx op,
+
+ info->all_words_same
+ = (info->words[0] == info->words[1]
+- && info->words[0] == info->words[1]
+ && info->words[0] == info->words[2]
+ && info->words[0] == info->words[3]);
+
+--- a/src/gcc/config/rs6000/rs6000.h
++++ b/src/gcc/config/rs6000/rs6000.h
+@@ -2632,3 +2632,9 @@ while (0)
+ rs6000_asm_output_opcode (STREAM); \
+ } \
+ while (0)
++
++/* Disable generation of scalar modulo instructions due to performance issues
++ with certain input values. This can be removed in the future when the
++ issues have been resolved. */
++#define RS6000_DISABLE_SCALAR_MODULO 1
++
+--- a/src/gcc/config/rs6000/rs6000.md
++++ b/src/gcc/config/rs6000/rs6000.md
+@@ -287,7 +287,7 @@ (define_attr "cannot_copy" "no,yes" (const_string "no"))
+ ;; Whether this insn has a prefixed form and a non-prefixed form.
+ (define_attr "maybe_prefixed" "no,yes"
+ (if_then_else (eq_attr "type" "load,fpload,vecload,store,fpstore,vecstore,
+- integer,add")
++ integer,add,fused_load_cmpi")
+ (const_string "yes")
+ (const_string "no")))
+
+@@ -302,7 +302,7 @@ (define_attr "prefixed" "no,yes"
+ (eq_attr "maybe_prefixed" "no"))
+ (const_string "no")
+
+- (eq_attr "type" "load,fpload,vecload")
++ (eq_attr "type" "load,fpload,vecload,fused_load_cmpi")
+ (if_then_else (match_test "prefixed_load_p (insn)")
+ (const_string "yes")
+ (const_string "no"))
+@@ -3359,6 +3359,17 @@ (define_expand "mod<mode>3"
+ FAIL;
+
+ operands[2] = force_reg (<MODE>mode, operands[2]);
++
++ if (RS6000_DISABLE_SCALAR_MODULO)
++ {
++ temp1 = gen_reg_rtx (<MODE>mode);
++ temp2 = gen_reg_rtx (<MODE>mode);
++
++ emit_insn (gen_div<mode>3 (temp1, operands[1], operands[2]));
++ emit_insn (gen_mul<mode>3 (temp2, temp1, operands[2]));
++ emit_insn (gen_sub<mode>3 (operands[0], operands[1], temp2));
++ DONE;
++ }
+ }
+ else
+ {
+@@ -3378,17 +3389,36 @@ (define_insn "*mod<mode>3"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=&r")
+ (mod:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
+ (match_operand:GPR 2 "gpc_reg_operand" "r")))]
+- "TARGET_MODULO"
++ "TARGET_MODULO && !RS6000_DISABLE_SCALAR_MODULO"
+ "mods<wd> %0,%1,%2"
+ [(set_attr "type" "div")
+ (set_attr "size" "<bits>")])
+
++;; This define_expand can be removed when RS6000_DISABLE_SCALAR_MODULO is
++;; removed.
++(define_expand "umod<mode>3"
++ [(set (match_operand:GPR 0 "gpc_reg_operand")
++ (umod:GPR (match_operand:GPR 1 "gpc_reg_operand")
++ (match_operand:GPR 2 "gpc_reg_operand")))]
++ "TARGET_MODULO"
++{
++ if (RS6000_DISABLE_SCALAR_MODULO)
++ {
++ rtx temp1 = gen_reg_rtx (<MODE>mode);
++ rtx temp2 = gen_reg_rtx (<MODE>mode);
++
++ emit_insn (gen_udiv<mode>3 (temp1, operands[1], operands[2]));
++ emit_insn (gen_mul<mode>3 (temp2, temp1, operands[2]));
++ emit_insn (gen_sub<mode>3 (operands[0], operands[1], temp2));
++ DONE;
++ }
++})
+
+-(define_insn "umod<mode>3"
++(define_insn "*umod<mode>3"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=&r")
+ (umod:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
+ (match_operand:GPR 2 "gpc_reg_operand" "r")))]
+- "TARGET_MODULO"
++ "TARGET_MODULO && !RS6000_DISABLE_SCALAR_MODULO"
+ "modu<wd> %0,%1,%2"
+ [(set_attr "type" "div")
+ (set_attr "size" "<bits>")])
+@@ -3445,7 +3475,7 @@ (define_insn "umodti3"
+ [(set (match_operand:TI 0 "altivec_register_operand" "=v")
+ (umod:TI (match_operand:TI 1 "altivec_register_operand" "v")
+ (match_operand:TI 2 "altivec_register_operand" "v")))]
+- "TARGET_POWER10 && TARGET_POWERPC64"
++ "TARGET_POWER10 && TARGET_POWERPC64 && !RS6000_DISABLE_SCALAR_MODULO"
+ "vmoduq %0,%1,%2"
+ [(set_attr "type" "vecdiv")
+ (set_attr "size" "128")])
+@@ -3454,7 +3484,7 @@ (define_insn "modti3"
+ [(set (match_operand:TI 0 "altivec_register_operand" "=v")
+ (mod:TI (match_operand:TI 1 "altivec_register_operand" "v")
+ (match_operand:TI 2 "altivec_register_operand" "v")))]
+- "TARGET_POWER10 && TARGET_POWERPC64"
++ "TARGET_POWER10 && TARGET_POWERPC64 && !RS6000_DISABLE_SCALAR_MODULO"
+ "vmodsq %0,%1,%2"
+ [(set_attr "type" "vecdiv")
+ (set_attr "size" "128")])
+--- a/src/gcc/config/rs6000/vsx.md
++++ b/src/gcc/config/rs6000/vsx.md
+@@ -2018,22 +2018,20 @@ (define_insn "*vsx_tsqrt<mode>2_internal"
+ "x<VSv>tsqrt<sd>p %0,%x1"
+ [(set_attr "type" "<VStype_simple>")])
+
+-;; Fused vector multiply/add instructions. Support the classical Altivec
+-;; versions of fma, which allows the target to be a separate register from the
+-;; 3 inputs. Under VSX, the target must be either the addend or the first
+-;; multiply.
+-
++;; Fused vector multiply/add instructions. Do not generate the Altivec versions
++;; of fma (vmaddfp and vnmsubfp). These instructions allows the target to be a
++;; separate register from the 3 inputs, but they have different rounding
++;; behaviors than the VSX instructions.
+ (define_insn "*vsx_fmav4sf4"
+- [(set (match_operand:V4SF 0 "vsx_register_operand" "=wa,wa,v")
++ [(set (match_operand:V4SF 0 "vsx_register_operand" "=wa,wa")
+ (fma:V4SF
+- (match_operand:V4SF 1 "vsx_register_operand" "%wa,wa,v")
+- (match_operand:V4SF 2 "vsx_register_operand" "wa,0,v")
+- (match_operand:V4SF 3 "vsx_register_operand" "0,wa,v")))]
++ (match_operand:V4SF 1 "vsx_register_operand" "%wa,wa")
++ (match_operand:V4SF 2 "vsx_register_operand" "wa,0")
++ (match_operand:V4SF 3 "vsx_register_operand" "0,wa")))]
+ "VECTOR_UNIT_VSX_P (V4SFmode)"
+ "@
+ xvmaddasp %x0,%x1,%x2
+- xvmaddmsp %x0,%x1,%x3
+- vmaddfp %0,%1,%2,%3"
++ xvmaddmsp %x0,%x1,%x3"
+ [(set_attr "type" "vecfloat")])
+
+ (define_insn "*vsx_fmav2df4"
+@@ -2075,18 +2073,17 @@ (define_insn "*vsx_nfma<mode>4"
+ [(set_attr "type" "<VStype_mul>")])
+
+ (define_insn "*vsx_nfmsv4sf4"
+- [(set (match_operand:V4SF 0 "vsx_register_operand" "=wa,wa,v")
++ [(set (match_operand:V4SF 0 "vsx_register_operand" "=wa,wa")
+ (neg:V4SF
+ (fma:V4SF
+- (match_operand:V4SF 1 "vsx_register_operand" "%wa,wa,v")
+- (match_operand:V4SF 2 "vsx_register_operand" "wa,0,v")
++ (match_operand:V4SF 1 "vsx_register_operand" "%wa,wa")
++ (match_operand:V4SF 2 "vsx_register_operand" "wa,0")
+ (neg:V4SF
+- (match_operand:V4SF 3 "vsx_register_operand" "0,wa,v")))))]
++ (match_operand:V4SF 3 "vsx_register_operand" "0,wa")))))]
+ "VECTOR_UNIT_VSX_P (V4SFmode)"
+ "@
+ xvnmsubasp %x0,%x1,%x2
+- xvnmsubmsp %x0,%x1,%x3
+- vnmsubfp %0,%1,%2,%3"
++ xvnmsubmsp %x0,%x1,%x3"
+ [(set_attr "type" "vecfloat")])
+
+ (define_insn "*vsx_nfmsv2df4"
+@@ -6560,7 +6557,7 @@ (define_insn "xxeval"
+ (match_operand:QI 4 "u8bit_cint_operand" "n")]
+ UNSPEC_XXEVAL))]
+ "TARGET_POWER10"
+- "xxeval %0,%1,%2,%3,%4"
++ "xxeval %x0,%x1,%x2,%x3,%4"
+ [(set_attr "type" "vecperm")
+ (set_attr "prefixed" "yes")])
+
+--- a/src/gcc/config/sh/sh.md
++++ b/src/gcc/config/sh/sh.md
+@@ -10680,6 +10680,45 @@ (define_peephole2
+ && peep2_reg_dead_p (2, operands[1]) && peep2_reg_dead_p (3, operands[0])"
+ [(const_int 0)]
+ {
++ if (MEM_P (operands[3]) && reg_overlap_mentioned_p (operands[0], operands[3]))
++ {
++ // Take care when the eliminated operand[0] register is part of
++ // the destination memory address.
++ rtx addr = XEXP (operands[3], 0);
++
++ if (REG_P (addr))
++ operands[3] = replace_equiv_address (operands[3], operands[1]);
++
++ else if (GET_CODE (addr) == PLUS && REG_P (XEXP (addr, 0))
++ && CONST_INT_P (XEXP (addr, 1))
++ && REGNO (operands[0]) == REGNO (XEXP (addr, 0)))
++ operands[3] = replace_equiv_address (operands[3],
++ gen_rtx_PLUS (SImode, operands[1], XEXP (addr, 1)));
++
++ else if (GET_CODE (addr) == PLUS && REG_P (XEXP (addr, 0))
++ && REG_P (XEXP (addr, 1)))
++ {
++ // register + register address @(R0, Rn)
++ // can change only the Rn in the address, not R0.
++ if (REGNO (operands[0]) == REGNO (XEXP (addr, 0))
++ && REGNO (XEXP (addr, 0)) != 0)
++ {
++ operands[3] = replace_equiv_address (operands[3],
++ gen_rtx_PLUS (SImode, operands[1], XEXP (addr, 1)));
++ }
++ else if (REGNO (operands[0]) == REGNO (XEXP (addr, 1))
++ && REGNO (XEXP (addr, 1)) != 0)
++ {
++ operands[3] = replace_equiv_address (operands[3],
++ gen_rtx_PLUS (SImode, XEXP (addr, 0), operands[1]));
++ }
++ else
++ FAIL;
++ }
++ else
++ FAIL;
++ }
++
+ emit_insn (gen_addsi3 (operands[1], operands[1], operands[2]));
+ sh_peephole_emit_move_insn (operands[3], operands[1]);
+ })
+--- a/src/gcc/cp/ChangeLog
++++ b/src/gcc/cp/ChangeLog
+@@ -1,3 +1,88 @@
++2023-08-11 Jason Merrill <jason@redhat.com>
++
++ PR c++/106310
++ * parser.cc (cp_parser_template_name): Skip non-member
++ lookup after the template keyword.
++ (cp_parser_lookup_name): Pass down template_keyword_p.
++
++2023-08-11 Jason Merrill <jason@redhat.com>
++
++ PR c++/106890
++ PR c++/109666
++ * name-lookup.cc (maybe_push_to_top_level)
++ (maybe_pop_from_top_level): Split out...
++ * pt.cc (instantiate_body): ...from here.
++ * init.cc (maybe_instantiate_nsdmi_init): Use them.
++ * name-lookup.h: Declare them..
++
++2023-08-11 Jason Merrill <jason@redhat.com>
++
++ PR c++/108099
++ * decl.cc (grokdeclarator): Don't clear typedef_decl after 'unsigned
++ typedef' pedwarn. Use c_common_signed_or_unsigned_type. Also
++ handle 'signed typedef'.
++
++2023-08-07 Patrick Palka <ppalka@redhat.com>
++
++ Backported from master:
++ 2023-05-09 Patrick Palka <ppalka@redhat.com>
++
++ PR c++/109761
++ * parser.cc (cp_parser_class_specifier): Don't pass a class
++ context to noexcept_override_late_checks.
++ (noexcept_override_late_checks): Remove 'type' parameter
++ and use DECL_CONTEXT of 'fndecl' instead.
++
++2023-07-12 Patrick Palka <ppalka@redhat.com>
++
++ Backported from master:
++ 2023-06-29 Patrick Palka <ppalka@redhat.com>
++
++ PR c++/110468
++ * init.cc (maybe_instantiate_nsdmi_init): Mask out all
++ tsubst flags except for tf_warning_or_error.
++
++2023-05-17 Jakub Jelinek <jakub@redhat.com>
++
++ Backported from master:
++ 2023-05-17 Jakub Jelinek <jakub@redhat.com>
++
++ PR c++/109868
++ * init.cc (build_zero_init_1): Don't initialize zero-width bitfields.
++ For unions only initialize the first FIELD_DECL.
++
++2023-05-15 Jason Merrill <jason@redhat.com>
++
++ PR c++/109241
++ * pt.cc (find_parameter_packs_r): Handle null TREE_BINFO.
++
++2023-05-09 Patrick Palka <ppalka@redhat.com>
++
++ Backported from master:
++ 2023-04-01 Patrick Palka <ppalka@redhat.com>
++
++ PR c++/109160
++ * cp-tree.h (do_auto_deduction): Add defaulted tmpl parameter.
++ * pt.cc (convert_template_argument): Pass 'in_decl' as 'tmpl' to
++ do_auto_deduction.
++ (tsubst_decl) <case VAR_/TYPE_DECL>: Pass 'tmpl' instead of 't' as
++ 'in_decl' to coerce_template_parms.
++ (unify) <case TEMPLATE_PARM_INDEX>: Pass TPARMS_PRIMARY_TEMPLATE
++ as 'tmpl' to do_auto_deduction.
++ (do_auto_deduction): Document default arguments. Rename local
++ variable 'tmpl' to 'ctmpl'. Use 'tmpl' to obtain a full set of
++ template arguments for satisfaction in the adc_unify case.
++
++2023-05-09 Jason Merrill <jason@redhat.com>
++
++ PR c++/106740
++ PR c++/105852
++ * decl.cc (duplicate_decls): Change non-templated friend
++ check to an assert.
++ * pt.cc (tsubst_function_decl): Don't set DECL_TEMPLATE_INFO
++ on non-templated friends.
++ (tsubst_friend_function): Adjust.
++
+ 2023-05-08 Release Manager
+
+ * GCC 12.3.0 released.
+--- a/src/gcc/cp/cp-tree.h
++++ b/src/gcc/cp/cp-tree.h
+@@ -7295,7 +7295,8 @@ extern tree do_auto_deduction (tree, tree, tree,
+ auto_deduction_context
+ = adc_unspecified,
+ tree = NULL_TREE,
+- int = LOOKUP_NORMAL);
++ int = LOOKUP_NORMAL,
++ tree = NULL_TREE);
+ extern tree type_uses_auto (tree);
+ extern tree type_uses_auto_or_concept (tree);
+ extern void append_type_to_template_for_access_check (tree, tree, tree,
+--- a/src/gcc/cp/decl.cc
++++ b/src/gcc/cp/decl.cc
+@@ -2666,10 +2666,11 @@ duplicate_decls (tree newdecl, tree olddecl, bool hiding, bool was_hidden)
+ = TINFO_USED_TEMPLATE_ID (new_template_info);
+ }
+
+- if (non_templated_friend_p (olddecl))
+- /* Don't copy tinfo from a non-templated friend (PR105761). */;
+- else
+- DECL_TEMPLATE_INFO (newdecl) = DECL_TEMPLATE_INFO (olddecl);
++ /* We don't want to copy template info from a non-templated friend
++ (PR105761), but these shouldn't have DECL_TEMPLATE_INFO now. */
++ gcc_checking_assert (!DECL_TEMPLATE_INFO (olddecl)
++ || !non_templated_friend_p (olddecl));
++ DECL_TEMPLATE_INFO (newdecl) = DECL_TEMPLATE_INFO (olddecl);
+ }
+
+ if (DECL_DECLARES_FUNCTION_P (newdecl))
+@@ -12300,11 +12301,14 @@ grokdeclarator (const cp_declarator *declarator,
+ {
+ if (typedef_decl)
+ {
+- pedwarn (loc, OPT_Wpedantic, "%qs specified with %qT",
+- key, type);
++ pedwarn (loc, OPT_Wpedantic,
++ "%qs specified with typedef-name %qD",
++ key, typedef_decl);
+ ok = !flag_pedantic_errors;
+- type = DECL_ORIGINAL_TYPE (typedef_decl);
+- typedef_decl = NULL_TREE;
++ /* PR108099: __int128_t comes from c_common_nodes_and_builtins,
++ and is not built as a typedef. */
++ if (is_typedef_decl (typedef_decl))
++ type = DECL_ORIGINAL_TYPE (typedef_decl);
+ }
+ else if (declspecs->decltype_p)
+ error_at (loc, "%qs specified with %<decltype%>", key);
+@@ -12357,7 +12361,7 @@ grokdeclarator (const cp_declarator *declarator,
+ else if (type == char_type_node)
+ type = unsigned_char_type_node;
+ else if (typedef_decl)
+- type = unsigned_type_for (type);
++ type = c_common_unsigned_type (type);
+ else
+ type = unsigned_type_node;
+ }
+@@ -12371,6 +12375,8 @@ grokdeclarator (const cp_declarator *declarator,
+ type = long_integer_type_node;
+ else if (short_p)
+ type = short_integer_type_node;
++ else if (signed_p && typedef_decl)
++ type = c_common_signed_type (type);
+
+ if (decl_spec_seq_has_spec_p (declspecs, ds_complex))
+ {
+--- a/src/gcc/cp/init.cc
++++ b/src/gcc/cp/init.cc
+@@ -189,15 +189,21 @@ build_zero_init_1 (tree type, tree nelts, bool static_storage_p,
+ init = build_zero_cst (type);
+ else if (RECORD_OR_UNION_CODE_P (TREE_CODE (type)))
+ {
+- tree field;
++ tree field, next;
+ vec<constructor_elt, va_gc> *v = NULL;
+
+ /* Iterate over the fields, building initializations. */
+- for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
++ for (field = TYPE_FIELDS (type); field; field = next)
+ {
++ next = DECL_CHAIN (field);
++
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
++ /* For unions, only the first field is initialized. */
++ if (TREE_CODE (type) == UNION_TYPE)
++ next = NULL_TREE;
++
+ if (TREE_TYPE (field) == error_mark_node)
+ continue;
+
+@@ -212,6 +218,11 @@ build_zero_init_1 (tree type, tree nelts, bool static_storage_p,
+ continue;
+ }
+
++ /* Don't add zero width bitfields. */
++ if (DECL_C_BIT_FIELD (field)
++ && integer_zerop (DECL_SIZE (field)))
++ continue;
++
+ /* Note that for class types there will be FIELD_DECLs
+ corresponding to base classes as well. Thus, iterating
+ over TYPE_FIELDs will result in correct initialization of
+@@ -230,10 +241,6 @@ build_zero_init_1 (tree type, tree nelts, bool static_storage_p,
+ if (value)
+ CONSTRUCTOR_APPEND_ELT(v, field, value);
+ }
+-
+- /* For unions, only the first field is initialized. */
+- if (TREE_CODE (type) == UNION_TYPE)
+- break;
+ }
+
+ /* Build a constructor to contain the initializations. */
+@@ -572,6 +579,10 @@ maybe_instantiate_nsdmi_init (tree member, tsubst_flags_t complain)
+ tree init = DECL_INITIAL (member);
+ if (init && DECL_LANG_SPECIFIC (member) && DECL_TEMPLATE_INFO (member))
+ {
++ /* Clear any special tsubst flags; the result of NSDMI instantiation
++ should be independent of the substitution context. */
++ complain &= tf_warning_or_error;
++
+ init = DECL_INITIAL (DECL_TI_TEMPLATE (member));
+ location_t expr_loc
+ = cp_expr_loc_or_loc (init, DECL_SOURCE_LOCATION (member));
+@@ -599,15 +610,9 @@ maybe_instantiate_nsdmi_init (tree member, tsubst_flags_t complain)
+ bool pushed = false;
+ tree ctx = DECL_CONTEXT (member);
+
+- processing_template_decl_sentinel ptds (/*reset*/false);
++ bool push_to_top = maybe_push_to_top_level (member);
+ if (!currently_open_class (ctx))
+ {
+- if (!LOCAL_CLASS_P (ctx))
+- push_to_top_level ();
+- else
+- /* push_to_top_level would lose the necessary function context,
+- just reset processing_template_decl. */
+- processing_template_decl = 0;
+ push_nested_class (ctx);
+ push_deferring_access_checks (dk_no_deferred);
+ pushed = true;
+@@ -635,9 +640,8 @@ maybe_instantiate_nsdmi_init (tree member, tsubst_flags_t complain)
+ {
+ pop_deferring_access_checks ();
+ pop_nested_class ();
+- if (!LOCAL_CLASS_P (ctx))
+- pop_from_top_level ();
+ }
++ maybe_pop_from_top_level (push_to_top);
+
+ input_location = sloc;
+ }
+--- a/src/gcc/cp/name-lookup.cc
++++ b/src/gcc/cp/name-lookup.cc
+@@ -8451,6 +8451,43 @@ pop_from_top_level (void)
+ free_saved_scope = s;
+ }
+
++/* Like push_to_top_level, but not if D is function-local. Returns whether we
++ did push to top. */
++
++bool
++maybe_push_to_top_level (tree d)
++{
++ /* Push if D isn't function-local, or is a lambda function, for which name
++ resolution is already done. */
++ bool push_to_top
++ = !(current_function_decl
++ && !LAMBDA_FUNCTION_P (d)
++ && decl_function_context (d) == current_function_decl);
++
++ if (push_to_top)
++ push_to_top_level ();
++ else
++ {
++ gcc_assert (!processing_template_decl);
++ push_function_context ();
++ cp_unevaluated_operand = 0;
++ c_inhibit_evaluation_warnings = 0;
++ }
++
++ return push_to_top;
++}
++
++/* Return from whatever maybe_push_to_top_level did. */
++
++void
++maybe_pop_from_top_level (bool push_to_top)
++{
++ if (push_to_top)
++ pop_from_top_level ();
++ else
++ pop_function_context ();
++}
++
+ /* Push into the scope of the namespace NS, even if it is deeply
+ nested within another namespace. */
+
+--- a/src/gcc/cp/name-lookup.h
++++ b/src/gcc/cp/name-lookup.h
+@@ -468,6 +468,8 @@ extern void push_nested_namespace (tree);
+ extern void pop_nested_namespace (tree);
+ extern void push_to_top_level (void);
+ extern void pop_from_top_level (void);
++extern bool maybe_push_to_top_level (tree);
++extern void maybe_pop_from_top_level (bool);
+ extern void push_using_decl_bindings (tree, tree);
+
+ /* Lower level interface for modules. */
+--- a/src/gcc/cp/parser.cc
++++ b/src/gcc/cp/parser.cc
+@@ -249,7 +249,7 @@ static cp_token_cache *cp_token_cache_new
+ static tree cp_parser_late_noexcept_specifier
+ (cp_parser *, tree);
+ static void noexcept_override_late_checks
+- (tree, tree);
++ (tree);
+
+ static void cp_parser_initial_pragma
+ (cp_token *);
+@@ -2660,7 +2660,7 @@ static tree cp_parser_objc_struct_declaration
+ /* Utility Routines */
+
+ static cp_expr cp_parser_lookup_name
+- (cp_parser *, tree, enum tag_types, bool, bool, bool, tree *, location_t);
++ (cp_parser *, tree, enum tag_types, int, bool, bool, tree *, location_t);
+ static tree cp_parser_lookup_name_simple
+ (cp_parser *, tree, location_t);
+ static tree cp_parser_maybe_treat_template_as_class
+@@ -18590,7 +18590,7 @@ cp_parser_template_name (cp_parser* parser,
+ /* Look up the name. */
+ decl = cp_parser_lookup_name (parser, identifier,
+ tag_type,
+- /*is_template=*/true,
++ /*is_template=*/1 + template_keyword_p,
+ /*is_namespace=*/false,
+ check_dependency_p,
+ /*ambiguous_decls=*/NULL,
+@@ -26151,7 +26151,7 @@ cp_parser_class_specifier_1 (cp_parser* parser)
+ /* The finish_struct call above performed various override checking,
+ but it skipped unparsed noexcept-specifier operands. Now that we
+ have resolved them, check again. */
+- noexcept_override_late_checks (type, decl);
++ noexcept_override_late_checks (decl);
+
+ /* Remove any member-function parameters from the symbol table. */
+ pop_injected_parms ();
+@@ -27876,14 +27876,13 @@ cp_parser_late_noexcept_specifier (cp_parser *parser, tree default_arg)
+ }
+
+ /* Perform late checking of overriding function with respect to their
+- noexcept-specifiers. TYPE is the class and FNDECL is the function
+- that potentially overrides some virtual function with the same
+- signature. */
++ noexcept-specifiers. FNDECL is the member function that potentially
++ overrides some virtual function with the same signature. */
+
+ static void
+-noexcept_override_late_checks (tree type, tree fndecl)
++noexcept_override_late_checks (tree fndecl)
+ {
+- tree binfo = TYPE_BINFO (type);
++ tree binfo = TYPE_BINFO (DECL_CONTEXT (fndecl));
+ tree base_binfo;
+
+ if (DECL_STATIC_FUNCTION_P (fndecl))
+@@ -30389,7 +30388,7 @@ prefer_type_arg (tag_types tag_type)
+ refer to types are ignored.
+
+ If IS_TEMPLATE is TRUE, bindings that do not refer to templates are
+- ignored.
++ ignored. If IS_TEMPLATE IS 2, the 'template' keyword was specified.
+
+ If IS_NAMESPACE is TRUE, bindings that do not refer to namespaces
+ are ignored.
+@@ -30404,7 +30403,7 @@ prefer_type_arg (tag_types tag_type)
+ static cp_expr
+ cp_parser_lookup_name (cp_parser *parser, tree name,
+ enum tag_types tag_type,
+- bool is_template,
++ int is_template,
+ bool is_namespace,
+ bool check_dependency,
+ tree *ambiguous_decls,
+@@ -30589,7 +30588,14 @@ cp_parser_lookup_name (cp_parser *parser, tree name,
+ else
+ decl = NULL_TREE;
+
+- if (!decl)
++ /* If we didn't find a member and have dependent bases, the member lookup
++ is now dependent. */
++ if (!dep && !decl && any_dependent_bases_p (object_type))
++ dep = true;
++
++ if (dep && is_template == 2)
++ /* The template keyword specifies a dependent template. */;
++ else if (!decl)
+ /* Look it up in the enclosing context. DR 141: When looking for a
+ template-name after -> or ., only consider class templates. */
+ decl = lookup_name (name, is_namespace ? LOOK_want::NAMESPACE
+@@ -30602,8 +30608,7 @@ cp_parser_lookup_name (cp_parser *parser, tree name,
+
+ /* If we know we're looking for a type (e.g. A in p->A::x),
+ mock up a typename. */
+- if (!decl && object_type && tag_type != none_type
+- && dependentish_scope_p (object_type))
++ if (!decl && dep && tag_type != none_type)
+ {
+ tree type = build_typename_type (object_type, name, name,
+ typename_type);
+--- a/src/gcc/cp/pt.cc
++++ b/src/gcc/cp/pt.cc
+@@ -4106,10 +4106,14 @@ find_parameter_packs_r (tree *tp, int *walk_subtrees, void* data)
+ case TAG_DEFN:
+ t = TREE_TYPE (t);
+ if (CLASS_TYPE_P (t))
+- /* Local class, need to look through the whole definition. */
+- for (tree bb : BINFO_BASE_BINFOS (TYPE_BINFO (t)))
+- cp_walk_tree (&BINFO_TYPE (bb), &find_parameter_packs_r,
+- ppd, ppd->visited);
++ {
++ /* Local class, need to look through the whole definition.
++ TYPE_BINFO might be unset for a partial instantiation. */
++ if (TYPE_BINFO (t))
++ for (tree bb : BINFO_BASE_BINFOS (TYPE_BINFO (t)))
++ cp_walk_tree (&BINFO_TYPE (bb), &find_parameter_packs_r,
++ ppd, ppd->visited);
++ }
+ else
+ /* Enum, look at the values. */
+ for (tree l = TYPE_VALUES (t); l; l = TREE_CHAIN (l))
+@@ -8636,7 +8640,7 @@ convert_template_argument (tree parm,
+ else if (tree a = type_uses_auto (t))
+ {
+ t = do_auto_deduction (t, arg, a, complain, adc_unify, args,
+- LOOKUP_IMPLICIT);
++ LOOKUP_IMPLICIT, /*tmpl=*/in_decl);
+ if (t == error_mark_node)
+ return error_mark_node;
+ }
+@@ -11339,9 +11343,10 @@ tsubst_friend_function (tree decl, tree args)
+ tree new_friend_template_info = DECL_TEMPLATE_INFO (new_friend);
+ tree new_friend_result_template_info = NULL_TREE;
+ bool new_friend_is_defn =
+- (DECL_INITIAL (DECL_TEMPLATE_RESULT
+- (template_for_substitution (new_friend)))
+- != NULL_TREE);
++ (new_friend_template_info
++ && (DECL_INITIAL (DECL_TEMPLATE_RESULT
++ (template_for_substitution (new_friend)))
++ != NULL_TREE));
+ tree not_tmpl = new_friend;
+
+ if (TREE_CODE (new_friend) == TEMPLATE_DECL)
+@@ -14175,6 +14180,10 @@ tsubst_function_decl (tree t, tree args, tsubst_flags_t complain,
+ && !LAMBDA_FUNCTION_P (t))
+ return t;
+
++ /* A non-templated friend doesn't get DECL_TEMPLATE_INFO. */
++ if (non_templated_friend_p (t))
++ goto friend_case;
++
+ /* Calculate the most general template of which R is a
+ specialization. */
+ gen_tmpl = most_general_template (DECL_TI_TEMPLATE (t));
+@@ -14220,6 +14229,7 @@ tsubst_function_decl (tree t, tree args, tsubst_flags_t complain,
+ tsubst_friend_function, and we want only to create a
+ new decl (R) with appropriate types so that we can call
+ determine_specialization. */
++ friend_case:
+ gen_tmpl = NULL_TREE;
+ argvec = NULL_TREE;
+ }
+@@ -14415,7 +14425,7 @@ tsubst_function_decl (tree t, tree args, tsubst_flags_t complain,
+ /* If this is an instantiation of a member template, clone it.
+ If it isn't, that'll be handled by
+ clone_constructors_and_destructors. */
+- if (PRIMARY_TEMPLATE_P (gen_tmpl))
++ if (gen_tmpl && PRIMARY_TEMPLATE_P (gen_tmpl))
+ clone_cdtor (r, /*update_methods=*/false);
+ }
+ else if ((complain & tf_error) != 0
+@@ -15022,7 +15032,7 @@ tsubst_decl (tree t, tree args, tsubst_flags_t complain)
+ if (argvec != error_mark_node)
+ argvec = (coerce_innermost_template_parms
+ (DECL_TEMPLATE_PARMS (gen_tmpl),
+- argvec, t, complain,
++ argvec, tmpl, complain,
+ /*all*/true, /*defarg*/true));
+ if (argvec == error_mark_node)
+ RETURN (error_mark_node);
+@@ -24449,7 +24459,9 @@ unify (tree tparms, tree targs, tree parm, tree arg, int strict,
+ if (tree a = type_uses_auto (tparm))
+ {
+ tparm = do_auto_deduction (tparm, arg, a,
+- complain, adc_unify, targs);
++ complain, adc_unify, targs,
++ LOOKUP_NORMAL,
++ TPARMS_PRIMARY_TEMPLATE (tparms));
+ if (tparm == error_mark_node)
+ return 1;
+ }
+@@ -26493,20 +26505,7 @@ instantiate_body (tree pattern, tree args, tree d, bool nested_p)
+ if (current_function_decl)
+ save_omp_privatization_clauses (omp_privatization_save);
+
+- bool push_to_top
+- = !(current_function_decl
+- && !LAMBDA_FUNCTION_P (d)
+- && decl_function_context (d) == current_function_decl);
+-
+- if (push_to_top)
+- push_to_top_level ();
+- else
+- {
+- gcc_assert (!processing_template_decl);
+- push_function_context ();
+- cp_unevaluated_operand = 0;
+- c_inhibit_evaluation_warnings = 0;
+- }
++ bool push_to_top = maybe_push_to_top_level (d);
+
+ if (VAR_P (d))
+ {
+@@ -26619,10 +26618,7 @@ instantiate_body (tree pattern, tree args, tree d, bool nested_p)
+ if (!nested_p)
+ TI_PENDING_TEMPLATE_FLAG (DECL_TEMPLATE_INFO (d)) = 0;
+
+- if (push_to_top)
+- pop_from_top_level ();
+- else
+- pop_function_context ();
++ maybe_pop_from_top_level (push_to_top);
+
+ if (current_function_decl)
+ restore_omp_privatization_clauses (omp_privatization_save);
+@@ -30334,13 +30330,20 @@ do_class_deduction (tree ptype, tree tmpl, tree init,
+ adc_requirement contexts to communicate the necessary template arguments
+ to satisfaction. OUTER_TARGS is ignored in other contexts.
+
+- For partial-concept-ids, extra args may be appended to the list of deduced
+- template arguments prior to determining constraint satisfaction. */
++ Additionally for adc_unify contexts TMPL is the template for which TYPE
++ is a template parameter type.
++
++ For partial-concept-ids, extra args from OUTER_TARGS, TMPL and the current
++ scope may be appended to the list of deduced template arguments prior to
++ determining constraint satisfaction as appropriate. */
+
+ tree
+ do_auto_deduction (tree type, tree init, tree auto_node,
+- tsubst_flags_t complain, auto_deduction_context context,
+- tree outer_targs, int flags)
++ tsubst_flags_t complain /* = tf_warning_or_error */,
++ auto_deduction_context context /* = adc_unspecified */,
++ tree outer_targs /* = NULL_TREE */,
++ int flags /* = LOOKUP_NORMAL */,
++ tree tmpl /* = NULL_TREE */)
+ {
+ if (init == error_mark_node)
+ return error_mark_node;
+@@ -30359,9 +30362,9 @@ do_auto_deduction (tree type, tree init, tree auto_node,
+ auto_node. */
+ complain &= ~tf_partial;
+
+- if (tree tmpl = CLASS_PLACEHOLDER_TEMPLATE (auto_node))
++ if (tree ctmpl = CLASS_PLACEHOLDER_TEMPLATE (auto_node))
+ /* C++17 class template argument deduction. */
+- return do_class_deduction (type, tmpl, init, flags, complain);
++ return do_class_deduction (type, ctmpl, init, flags, complain);
+
+ if (init == NULL_TREE || TREE_TYPE (init) == NULL_TREE)
+ /* Nothing we can do with this, even in deduction context. */
+@@ -30521,7 +30524,10 @@ do_auto_deduction (tree type, tree init, tree auto_node,
+ }
+ }
+
+- tree full_targs = add_to_template_args (outer_targs, targs);
++ tree full_targs = outer_targs;
++ if (context == adc_unify && tmpl)
++ full_targs = add_outermost_template_args (tmpl, full_targs);
++ full_targs = add_to_template_args (full_targs, targs);
+
+ /* HACK: Compensate for callers not always communicating all levels of
+ outer template arguments by filling in the outermost missing levels
+--- a/src/gcc/cprop.cc
++++ b/src/gcc/cprop.cc
+@@ -22,6 +22,7 @@ along with GCC; see the file COPYING3. If not see
+ #include "coretypes.h"
+ #include "backend.h"
+ #include "rtl.h"
++#include "rtlanal.h"
+ #include "cfghooks.h"
+ #include "df.h"
+ #include "insn-config.h"
+@@ -795,7 +796,8 @@ try_replace_reg (rtx from, rtx to, rtx_insn *insn)
+ /* If we've failed perform the replacement, have a single SET to
+ a REG destination and don't yet have a note, add a REG_EQUAL note
+ to not lose information. */
+- if (!success && note == 0 && set != 0 && REG_P (SET_DEST (set)))
++ if (!success && note == 0 && set != 0 && REG_P (SET_DEST (set))
++ && !contains_paradoxical_subreg_p (SET_SRC (set)))
+ note = set_unique_reg_note (insn, REG_EQUAL, copy_rtx (src));
+ }
+
+--- a/src/gcc/d/ChangeLog
++++ b/src/gcc/d/ChangeLog
+@@ -1,3 +1,81 @@
++2023-08-15 Iain Buclaw <ibuclaw@gdcproject.org>
++
++ PR d/110959
++ * dmd/canthrow.d (Dsymbol_canThrow): Use foreachVar.
++ * dmd/declaration.d (TupleDeclaration::needThis): Likewise.
++ (TupleDeclaration::foreachVar): New function.
++ (VarDeclaration::setFieldOffset): Use foreachVar.
++ * dmd/dinterpret.d (Interpreter::visit (DeclarationExp)): Likewise.
++ * dmd/dsymbolsem.d (DsymbolSemanticVisitor::visit (VarDeclaration)):
++ Don't push tuple field members to the scope symbol table.
++ (determineFields): Handle pushing tuple field members here instead.
++ * dmd/dtoh.d (ToCppBuffer::visit (VarDeclaration)): Visit all tuple
++ fields.
++ (ToCppBuffer::visit (TupleDeclaration)): New function.
++ * dmd/expression.d (expandAliasThisTuples): Use foreachVar.
++ * dmd/foreachvar.d (VarWalker::visit (DeclarationExp)): Likewise.
++ * dmd/ob.d (genKill): Likewise.
++ (checkObErrors): Likewise.
++ * dmd/semantic2.d (Semantic2Visitor::visit (TupleDeclaration)): Visit
++ all tuple fields.
++
++2023-07-07 Iain Buclaw <ibuclaw@gdcproject.org>
++
++ Backported from master:
++ 2023-07-07 Iain Buclaw <ibuclaw@gdcproject.org>
++
++ PR d/108842
++ * decl.cc (DeclVisitor::visit (VarDeclaration *)): Only emit scalar
++ manifest constants.
++ (get_symbol_decl): Don't generate CONST_DECL for non-scalar manifest
++ constants.
++ * imports.cc (ImportVisitor::visit (VarDeclaration *)): New method.
++
++2023-07-02 Iain Buclaw <ibuclaw@gdcproject.org>
++
++ Backported from master:
++ 2023-07-02 Iain Buclaw <ibuclaw@gdcproject.org>
++
++ PR d/110516
++ * intrinsics.cc (expand_volatile_load): Set TREE_SIDE_EFFECTS on the
++ expanded expression.
++ (expand_volatile_store): Likewise.
++
++2023-07-01 Iain Buclaw <ibuclaw@gdcproject.org>
++
++ Backported from master:
++ 2023-07-01 Iain Buclaw <ibuclaw@gdcproject.org>
++
++ PR d/110514
++ * decl.cc (get_symbol_decl): Set TREE_READONLY on certain kinds of
++ const and immutable variables.
++ * expr.cc (ExprVisitor::visit (ArrayLiteralExp *)): Set TREE_READONLY
++ on immutable dynamic array literals.
++
++2023-06-26 Iain Buclaw <ibuclaw@gdcproject.org>
++
++ Backported from master:
++ 2023-06-26 Iain Buclaw <ibuclaw@gdcproject.org>
++
++ PR d/110359
++ * d-convert.cc (convert_for_rvalue): Only apply the @safe boolean
++ conversion to boolean fields of a union.
++ (convert_for_condition): Call convert_for_rvalue in the default case.
++
++2023-06-26 Iain Buclaw <ibuclaw@gdcproject.org>
++
++ Backported from master:
++ 2023-06-26 Iain Buclaw <ibuclaw@gdcproject.org>
++
++ PR d/110113
++ * dmd/escape.d (checkMutableArguments): Always allocate new buffer for
++ computing escapeBy.
++
++2023-06-06 Iain Buclaw <ibuclaw@gdcproject.org>
++
++ * dmd/MERGE: Merge upstream dmd 316b89f1e3.
++ * dmd/VERSION: Bump version to v2.100.2.
++
+ 2023-05-08 Release Manager
+
+ * GCC 12.3.0 released.
+--- a/src/gcc/d/d-convert.cc
++++ b/src/gcc/d/d-convert.cc
+@@ -620,7 +620,7 @@ convert_expr (tree exp, Type *etype, Type *totype)
+ return result ? result : convert (build_ctype (totype), exp);
+ }
+
+-/* Return a TREE represenwation of EXPR, whose type has been converted from
++/* Return a TREE representation of EXPR, whose type has been converted from
+ * ETYPE to TOTYPE, and is being used in an rvalue context. */
+
+ tree
+@@ -635,20 +635,27 @@ convert_for_rvalue (tree expr, Type *etype, Type *totype)
+ {
+ /* If casting from bool, the result is either 0 or 1, any other value
+ violates @safe code, so enforce that it is never invalid. */
+- if (CONSTANT_CLASS_P (expr))
+- result = d_truthvalue_conversion (expr);
+- else
++ for (tree ref = expr; TREE_CODE (ref) == COMPONENT_REF;
++ ref = TREE_OPERAND (ref, 0))
+ {
+- /* Reinterpret the boolean as an integer and test the first bit.
+- The generated code should end up being equivalent to:
++ /* If the expression is a field that's part of a union, reinterpret
++ the boolean as an integer and test the first bit. The generated
++ code should end up being equivalent to:
+ *cast(ubyte *)&expr & 1; */
+- machine_mode bool_mode = TYPE_MODE (TREE_TYPE (expr));
+- tree mtype = lang_hooks.types.type_for_mode (bool_mode, 1);
+- result = fold_build2 (BIT_AND_EXPR, mtype,
+- build_vconvert (mtype, expr),
+- build_one_cst (mtype));
++ if (TREE_CODE (TREE_TYPE (TREE_OPERAND (ref, 0))) == UNION_TYPE)
++ {
++ machine_mode bool_mode = TYPE_MODE (TREE_TYPE (expr));
++ tree mtype = lang_hooks.types.type_for_mode (bool_mode, 1);
++ result = fold_build2 (BIT_AND_EXPR, mtype,
++ build_vconvert (mtype, expr),
++ build_one_cst (mtype));
++ break;
++ }
+ }
+
++ if (result == NULL_TREE)
++ result = d_truthvalue_conversion (expr);
++
+ result = convert (build_ctype (tbtype), result);
+ }
+
+@@ -845,7 +852,7 @@ convert_for_condition (tree expr, Type *type)
+ break;
+
+ default:
+- result = expr;
++ result = convert_for_rvalue (expr, type, type);
+ break;
+ }
+
+--- a/src/gcc/d/decl.cc
++++ b/src/gcc/d/decl.cc
+@@ -781,7 +781,7 @@ public:
+ {
+ /* Do not store variables we cannot take the address of,
+ but keep the values for purposes of debugging. */
+- if (!d->type->isscalar ())
++ if (d->type->isscalar () && !d->type->hasPointers ())
+ {
+ tree decl = get_symbol_decl (d);
+ d_pushdecl (decl);
+@@ -1199,6 +1199,20 @@ get_symbol_decl (Declaration *decl)
+ return decl->csym;
+ }
+
++ if (VarDeclaration *vd = decl->isVarDeclaration ())
++ {
++ /* CONST_DECL was initially intended for enumerals and may be used for
++ scalars in general, but not for aggregates. Here a non-constant
++ value is generated anyway so as its value can be used. */
++ if (!vd->canTakeAddressOf () && !vd->type->isscalar ())
++ {
++ gcc_assert (vd->_init && !vd->_init->isVoidInitializer ());
++ Expression *ie = initializerToExpression (vd->_init);
++ decl->csym = build_expr (ie, false);
++ return decl->csym;
++ }
++ }
++
+ /* Build the tree for the symbol. */
+ FuncDeclaration *fd = decl->isFuncDeclaration ();
+ if (fd)
+@@ -1246,24 +1260,30 @@ get_symbol_decl (Declaration *decl)
+ if (vd->storage_class & STCextern)
+ DECL_EXTERNAL (decl->csym) = 1;
+
+- /* CONST_DECL was initially intended for enumerals and may be used for
+- scalars in general, but not for aggregates. Here a non-constant
+- value is generated anyway so as the CONST_DECL only serves as a
+- placeholder for the value, however the DECL itself should never be
+- referenced in any generated code, or passed to the back-end. */
+- if (vd->storage_class & STCmanifest)
++ if (!vd->canTakeAddressOf ())
+ {
+ /* Cannot make an expression out of a void initializer. */
+- if (vd->_init && !vd->_init->isVoidInitializer ())
+- {
+- Expression *ie = initializerToExpression (vd->_init);
++ gcc_assert (vd->_init && !vd->_init->isVoidInitializer ());
++ /* Non-scalar manifest constants have already been dealt with. */
++ gcc_assert (vd->type->isscalar ());
+
+- if (!vd->type->isscalar ())
+- DECL_INITIAL (decl->csym) = build_expr (ie, false);
+- else
+- DECL_INITIAL (decl->csym) = build_expr (ie, true);
+- }
++ Expression *ie = initializerToExpression (vd->_init);
++ DECL_INITIAL (decl->csym) = build_expr (ie, true);
+ }
++
++ /* [type-qualifiers/const-and-immutable]
++
++ `immutable` applies to data that cannot change. Immutable data values,
++ once constructed, remain the same for the duration of the program's
++ execution. */
++ if (vd->isImmutable () && !vd->setInCtorOnly ())
++ TREE_READONLY (decl->csym) = 1;
++
++ /* `const` applies to data that cannot be changed by the const reference
++ to that data. It may, however, be changed by another reference to that
++ same data. */
++ if (vd->isConst () && !vd->isDataseg ())
++ TREE_READONLY (decl->csym) = 1;
+ }
+
+ /* Set the declaration mangled identifier if static. */
+--- a/src/gcc/d/dmd/MERGE
++++ b/src/gcc/d/dmd/MERGE
+@@ -1,4 +1,4 @@
+-76e3b41375e3e1cb4dbca692b587d8e916c0b49f
++316b89f1e3dffcad488c26f56f58c8adfcb84b26
+
+ The first line of this file holds the git revision number of the last
+ merge done from the dlang/dmd repository.
+--- a/src/gcc/d/dmd/VERSION
++++ b/src/gcc/d/dmd/VERSION
+@@ -1 +1 @@
+-v2.100.1
++v2.100.2
+--- a/src/gcc/d/dmd/canthrow.d
++++ b/src/gcc/d/dmd/canthrow.d
+@@ -270,18 +270,7 @@ private CT Dsymbol_canThrow(Dsymbol s, FuncDeclaration func, bool mustNotThrow)
+ }
+ else if (auto td = s.isTupleDeclaration())
+ {
+- for (size_t i = 0; i < td.objects.dim; i++)
+- {
+- RootObject o = (*td.objects)[i];
+- if (o.dyncast() == DYNCAST.expression)
+- {
+- Expression eo = cast(Expression)o;
+- if (auto se = eo.isDsymbolExp())
+- {
+- result |= Dsymbol_canThrow(se.s, func, mustNotThrow);
+- }
+- }
+- }
++ td.foreachVar(&symbolDg);
+ }
+ return result;
+ }
+--- a/src/gcc/d/dmd/declaration.d
++++ b/src/gcc/d/dmd/declaration.d
+@@ -656,23 +656,46 @@ extern (C++) final class TupleDeclaration : Declaration
+ override bool needThis()
+ {
+ //printf("TupleDeclaration::needThis(%s)\n", toChars());
+- for (size_t i = 0; i < objects.dim; i++)
++ return isexp ? foreachVar((s) { return s.needThis(); }) != 0 : false;
++ }
++
++ /***********************************************************
++ * Calls dg(Dsymbol) for each Dsymbol, which should be a VarDeclaration
++ * inside DsymbolExp (isexp == true).
++ * Params:
++ * dg = delegate to call for each Dsymbol
++ */
++ extern (D) void foreachVar(scope void delegate(Dsymbol) dg)
++ {
++ assert(isexp);
++ foreach (o; *objects)
+ {
+- RootObject o = (*objects)[i];
+- if (o.dyncast() == DYNCAST.expression)
+- {
+- Expression e = cast(Expression)o;
+- if (DsymbolExp ve = e.isDsymbolExp())
+- {
+- Declaration d = ve.s.isDeclaration();
+- if (d && d.needThis())
+- {
+- return true;
+- }
+- }
+- }
++ if (auto e = o.isExpression())
++ if (auto se = e.isDsymbolExp())
++ dg(se.s);
+ }
+- return false;
++ }
++
++ /***********************************************************
++ * Calls dg(Dsymbol) for each Dsymbol, which should be a VarDeclaration
++ * inside DsymbolExp (isexp == true).
++ * If dg returns !=0, stops and returns that value else returns 0.
++ * Params:
++ * dg = delegate to call for each Dsymbol
++ * Returns:
++ * last value returned by dg()
++ */
++ extern (D) int foreachVar(scope int delegate(Dsymbol) dg)
++ {
++ assert(isexp);
++ foreach (o; *objects)
++ {
++ if (auto e = o.isExpression())
++ if (auto se = e.isDsymbolExp())
++ if(auto ret = dg(se.s))
++ return ret;
++ }
++ return 0;
+ }
+
+ override inout(TupleDeclaration) isTupleDeclaration() inout
+@@ -1142,15 +1165,7 @@ extern (C++) class VarDeclaration : Declaration
+ // If this variable was really a tuple, set the offsets for the tuple fields
+ TupleDeclaration v2 = aliassym.isTupleDeclaration();
+ assert(v2);
+- for (size_t i = 0; i < v2.objects.dim; i++)
+- {
+- RootObject o = (*v2.objects)[i];
+- assert(o.dyncast() == DYNCAST.expression);
+- Expression e = cast(Expression)o;
+- assert(e.op == EXP.dSymbol);
+- DsymbolExp se = e.isDsymbolExp();
+- se.s.setFieldOffset(ad, fieldState, isunion);
+- }
++ v2.foreachVar((s) { s.setFieldOffset(ad, fieldState, isunion); });
+ return;
+ }
+
+--- a/src/gcc/d/dmd/dinterpret.d
++++ b/src/gcc/d/dmd/dinterpret.d
+@@ -2291,16 +2291,12 @@ public:
+ result = null;
+
+ // Reserve stack space for all tuple members
+- if (!td.objects)
+- return;
+- foreach (o; *td.objects)
++ td.foreachVar((s)
+ {
+- Expression ex = isExpression(o);
+- DsymbolExp ds = ex ? ex.isDsymbolExp() : null;
+- VarDeclaration v2 = ds ? ds.s.isVarDeclaration() : null;
++ VarDeclaration v2 = s.isVarDeclaration();
+ assert(v2);
+ if (v2.isDataseg() && !v2.isCTFE())
+- continue;
++ return 0;
+
+ ctfeGlobals.stack.push(v2);
+ if (v2._init)
+@@ -2310,7 +2306,7 @@ public:
+ {
+ einit = interpretRegion(ie.exp, istate, goal);
+ if (exceptionOrCant(einit))
+- return;
++ return 1;
+ }
+ else if (v2._init.isVoidInitializer())
+ {
+@@ -2320,11 +2316,12 @@ public:
+ {
+ e.error("declaration `%s` is not yet implemented in CTFE", e.toChars());
+ result = CTFEExp.cantexp;
+- return;
++ return 1;
+ }
+ setValue(v2, einit);
+ }
+- }
++ return 0;
++ });
+ return;
+ }
+ if (v.isStatic())
+--- a/src/gcc/d/dmd/dsymbolsem.d
++++ b/src/gcc/d/dmd/dsymbolsem.d
+@@ -650,7 +650,7 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
+ else
+ ti = dsym._init ? dsym._init.syntaxCopy() : null;
+
+- StorageClass storage_class = STC.temp | STC.local | dsym.storage_class;
++ StorageClass storage_class = STC.temp | dsym.storage_class;
+ if ((dsym.storage_class & STC.parameter) && (arg.storageClass & STC.parameter))
+ storage_class |= arg.storageClass;
+ auto v = new VarDeclaration(dsym.loc, arg.type, id, ti, storage_class);
+@@ -659,14 +659,6 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor
+
+ v.dsymbolSemantic(sc);
+
+- if (sc.scopesym)
+- {
+- //printf("adding %s to %s\n", v.toChars(), sc.scopesym.toChars());
+- if (sc.scopesym.members)
+- // Note this prevents using foreach() over members, because the limits can change
+- sc.scopesym.members.push(v);
+- }
+-
+ Expression e = new DsymbolExp(dsym.loc, v);
+ (*exps)[i] = e;
+ }
+@@ -6819,7 +6811,12 @@ bool determineFields(AggregateDeclaration ad)
+ return 1;
+
+ if (v.aliassym)
+- return 0; // If this variable was really a tuple, skip it.
++ {
++ // If this variable was really a tuple, process each element.
++ if (auto tup = v.aliassym.isTupleDeclaration())
++ return tup.foreachVar(tv => tv.apply(&func, ad));
++ return 0;
++ }
+
+ if (v.storage_class & (STC.static_ | STC.extern_ | STC.tls | STC.gshared | STC.manifest | STC.ctfe | STC.templateparameter))
+ return 0;
+--- a/src/gcc/d/dmd/dtoh.d
++++ b/src/gcc/d/dmd/dtoh.d
+@@ -877,7 +877,11 @@ public:
+ // Tuple field are expanded into multiple VarDeclarations
+ // (we'll visit them later)
+ if (vd.type && vd.type.isTypeTuple())
++ {
++ assert(vd.aliassym);
++ vd.toAlias().accept(this);
+ return;
++ }
+
+ if (vd.originalType && vd.type == AST.Type.tsize_t)
+ origType = vd.originalType;
+@@ -1667,6 +1671,13 @@ public:
+ assert(false, "This node type should be handled in the EnumDeclaration");
+ }
+
++ override void visit(AST.TupleDeclaration tup)
++ {
++ debug (Debug_DtoH) mixin(traceVisit!tup);
++
++ tup.foreachVar((s) { s.accept(this); });
++ }
++
+ /**
+ * Prints a member/parameter/variable declaration into `buf`.
+ *
+--- a/src/gcc/d/dmd/escape.d
++++ b/src/gcc/d/dmd/escape.d
+@@ -77,22 +77,7 @@ bool checkMutableArguments(Scope* sc, FuncDeclaration fd, TypeFunction tf,
+ bool isMutable; // true if reference to mutable
+ }
+
+- /* Store escapeBy as static data escapeByStorage so we can keep reusing the same
+- * arrays rather than reallocating them.
+- */
+- __gshared EscapeBy[] escapeByStorage;
+- auto escapeBy = escapeByStorage;
+- if (escapeBy.length < len)
+- {
+- auto newPtr = cast(EscapeBy*)mem.xrealloc(escapeBy.ptr, len * EscapeBy.sizeof);
+- // Clear the new section
+- memset(newPtr + escapeBy.length, 0, (len - escapeBy.length) * EscapeBy.sizeof);
+- escapeBy = newPtr[0 .. len];
+- escapeByStorage = escapeBy;
+- }
+- else
+- escapeBy = escapeBy[0 .. len];
+-
++ auto escapeBy = new EscapeBy[len];
+ const paramLength = tf.parameterList.length;
+
+ // Fill in escapeBy[] with arguments[], ethis, and outerVars[]
+@@ -212,13 +197,6 @@ bool checkMutableArguments(Scope* sc, FuncDeclaration fd, TypeFunction tf,
+ escape(i, eb, false);
+ }
+
+- /* Reset the arrays in escapeBy[] so we can reuse them next time through
+- */
+- foreach (ref eb; escapeBy)
+- {
+- eb.er.reset();
+- }
+-
+ return errors;
+ }
+
+--- a/src/gcc/d/dmd/expression.d
++++ b/src/gcc/d/dmd/expression.d
+@@ -348,14 +348,16 @@ int expandAliasThisTuples(Expressions* exps, size_t starti = 0)
+ if (TupleDeclaration td = exp.isAliasThisTuple)
+ {
+ exps.remove(u);
+- foreach (i, o; *td.objects)
++ size_t i;
++ td.foreachVar((s)
+ {
+- auto d = o.isExpression().isDsymbolExp().s.isDeclaration();
++ auto d = s.isDeclaration();
+ auto e = new DotVarExp(exp.loc, exp, d);
+ assert(d.type);
+ e.type = d.type;
+ exps.insert(u + i, e);
+- }
++ ++i;
++ });
+ version (none)
+ {
+ printf("expansion ->\n");
+--- a/src/gcc/d/dmd/foreachvar.d
++++ b/src/gcc/d/dmd/foreachvar.d
+@@ -75,19 +75,7 @@ void foreachVar(Expression e, void delegate(VarDeclaration) dgVar)
+ if (!v)
+ return;
+ if (TupleDeclaration td = v.toAlias().isTupleDeclaration())
+- {
+- if (!td.objects)
+- return;
+- foreach (o; *td.objects)
+- {
+- Expression ex = isExpression(o);
+- DsymbolExp s = ex ? ex.isDsymbolExp() : null;
+- assert(s);
+- VarDeclaration v2 = s.s.isVarDeclaration();
+- assert(v2);
+- dgVar(v2);
+- }
+- }
++ td.foreachVar((s) { dgVar(s.isVarDeclaration()); });
+ else
+ dgVar(v);
+ Dsymbol s = v.toAlias();
+--- a/src/gcc/d/dmd/ob.d
++++ b/src/gcc/d/dmd/ob.d
+@@ -1407,16 +1407,7 @@ void genKill(ref ObState obstate, ObNode* ob)
+ }
+ else if (auto td = s.isTupleDeclaration())
+ {
+- foreach (o; *td.objects)
+- {
+- if (auto eo = o.isExpression())
+- {
+- if (auto se = eo.isDsymbolExp())
+- {
+- Dsymbol_visit(se.s);
+- }
+- }
+- }
++ td.foreachVar(&Dsymbol_visit);
+ }
+ }
+
+@@ -2107,16 +2098,7 @@ void checkObErrors(ref ObState obstate)
+ }
+ else if (auto td = s.isTupleDeclaration())
+ {
+- foreach (o; *td.objects)
+- {
+- if (auto eo = o.isExpression())
+- {
+- if (auto se = eo.isDsymbolExp())
+- {
+- Dsymbol_visit(se.s);
+- }
+- }
+- }
++ td.foreachVar(&Dsymbol_visit);
+ }
+ }
+
+--- a/src/gcc/d/dmd/semantic2.d
++++ b/src/gcc/d/dmd/semantic2.d
+@@ -677,6 +677,11 @@ private extern(C++) final class Semantic2Visitor : Visitor
+ {
+ visit(cast(AggregateDeclaration) cd);
+ }
++
++ override void visit(TupleDeclaration td)
++ {
++ td.foreachVar((s) { s.accept(this); });
++ }
+ }
+
+ /**
+--- a/src/gcc/d/expr.cc
++++ b/src/gcc/d/expr.cc
+@@ -2708,6 +2708,10 @@ public:
+ if (tb->ty == TY::Tarray)
+ ctor = d_array_value (type, size_int (e->elements->length), ctor);
+
++ /* Immutable literals can be placed in rodata. */
++ if (tb->isImmutable ())
++ TREE_READONLY (decl) = 1;
++
+ d_pushdecl (decl);
+ rest_of_decl_compilation (decl, 1, 0);
+ }
+--- a/src/gcc/d/imports.cc
++++ b/src/gcc/d/imports.cc
+@@ -127,6 +127,15 @@ public:
+ this->result_ = this->make_import (TYPE_STUB_DECL (type));
+ }
+
++ void visit (VarDeclaration *d)
++ {
++ /* Not all kinds of manifest constants create a CONST_DECL. */
++ if (!d->canTakeAddressOf () && !d->type->isscalar ())
++ return;
++
++ visit ((Declaration *) d);
++ }
++
+ /* For now, ignore importing other kinds of dsymbols. */
+ void visit (ScopeDsymbol *)
+ {
+--- a/src/gcc/d/intrinsics.cc
++++ b/src/gcc/d/intrinsics.cc
+@@ -721,6 +721,7 @@ expand_volatile_load (tree callexp)
+ tree type = build_qualified_type (TREE_TYPE (ptrtype), TYPE_QUAL_VOLATILE);
+ tree result = indirect_ref (type, ptr);
+ TREE_THIS_VOLATILE (result) = 1;
++ TREE_SIDE_EFFECTS (result) = 1;
+
+ return result;
+ }
+@@ -748,6 +749,7 @@ expand_volatile_store (tree callexp)
+ tree type = build_qualified_type (TREE_TYPE (ptrtype), TYPE_QUAL_VOLATILE);
+ tree result = indirect_ref (type, ptr);
+ TREE_THIS_VOLATILE (result) = 1;
++ TREE_SIDE_EFFECTS (result) = 1;
+
+ /* (*(volatile T *) ptr) = value; */
+ tree value = CALL_EXPR_ARG (callexp, 1);
+--- a/src/gcc/fortran/ChangeLog
++++ b/src/gcc/fortran/ChangeLog
+@@ -1,3 +1,72 @@
++2023-08-06 Steve Kargl <kargl@gcc.gnu.org>
++
++ Backported from master:
++ 2022-12-18 Steve Kargl <kargl@gcc.gnu.org>
++
++ PR fortran/107397
++ * decl.cc (add_init_expr_to_sym): Add check with new error message.
++
++2023-07-20 Harald Anlauf <anlauf@gmx.de>
++
++ Backported from master:
++ 2023-07-17 Harald Anlauf <anlauf@gmx.de>
++
++ PR fortran/95947
++ PR fortran/110658
++ * trans-expr.cc (gfc_conv_procedure_call): For intrinsic procedures
++ whose result characteristics depends on the first argument and which
++ can be of type character, the character length will not be deferred.
++
++2023-07-14 Harald Anlauf <anlauf@gmx.de>
++
++ Backported from master:
++ 2023-07-11 Harald Anlauf <anlauf@gmx.de>
++
++ PR fortran/110288
++ * symbol.cc (gfc_copy_formal_args_intr): When deriving the formal
++ argument attributes from the actual ones for intrinsic procedure
++ calls, take special care of CHARACTER arguments that we do not
++ wrongly treat them formally as deferred-length.
++
++2023-07-08 Harald Anlauf <anlauf@gmx.de>
++
++ Backported from master:
++ 2023-07-08 Harald Anlauf <anlauf@gmx.de>
++
++ PR fortran/110585
++ * arith.cc (gfc_compare_expr): Handle equality comparison of constant
++ complex gfc_expr arguments.
++
++2023-06-09 Jakub Jelinek <jakub@redhat.com>
++
++ Backported from master:
++ 2023-06-09 Jakub Jelinek <jakub@redhat.com>
++
++ PR fortran/96024
++ * primary.cc (gfc_convert_to_structure_constructor): Only do
++ constant string ctor length verification and truncation/padding
++ if constant length has INTEGER type.
++
++2023-06-04 Steve Kargl <kargl@gcc.gnu.org>
++
++ Backported from master:
++ 2023-06-02 Steve Kargl <kargl@gcc.gnu.org>
++
++ PR fortran/100607
++ * resolve.cc (resolve_select_rank): Remove duplicate error.
++ (resolve_fl_var_and_proc): Prevent NULL pointer dereference and
++ suppress error message for temporary.
++
++2023-05-20 Harald Anlauf <anlauf@gmx.de>
++
++ Backported from master:
++ 2023-05-15 Harald Anlauf <anlauf@gmx.de>
++
++ PR fortran/109846
++ * expr.cc (gfc_check_vardef_context): Check appropriate pointer
++ attribute for CLASS vs. non-CLASS function result in variable
++ definition context.
++
+ 2023-05-08 Release Manager
+
+ * GCC 12.3.0 released.
+--- a/src/gcc/fortran/arith.cc
++++ b/src/gcc/fortran/arith.cc
+@@ -1080,6 +1080,11 @@ gfc_compare_expr (gfc_expr *op1, gfc_expr *op2, gfc_intrinsic_op op)
+ || (op1->value.logical && !op2->value.logical));
+ break;
+
++ case BT_COMPLEX:
++ gcc_assert (op == INTRINSIC_EQ);
++ rc = mpc_cmp (op1->value.complex, op2->value.complex);
++ break;
++
+ default:
+ gfc_internal_error ("gfc_compare_expr(): Bad basic type");
+ }
+--- a/src/gcc/fortran/decl.cc
++++ b/src/gcc/fortran/decl.cc
+@@ -2220,6 +2220,14 @@ add_init_expr_to_sym (const char *name, gfc_expr **initp, locus *var_locus)
+ sym->ts.f90_type = init->ts.f90_type;
+ }
+
++ /* Catch the case: type(t), parameter :: x = z'1'. */
++ if (sym->ts.type == BT_DERIVED && init->ts.type == BT_BOZ)
++ {
++ gfc_error ("Entity %qs at %L is incompatible with a BOZ "
++ "literal constant", name, &sym->declared_at);
++ return false;
++ }
++
+ /* Add initializer. Make sure we keep the ranks sane. */
+ if (sym->attr.dimension && init->rank == 0)
+ {
+--- a/src/gcc/fortran/expr.cc
++++ b/src/gcc/fortran/expr.cc
+@@ -6254,7 +6254,7 @@ gfc_check_vardef_context (gfc_expr* e, bool pointer, bool alloc_obj,
+ && !(sym->attr.flavor == FL_PROCEDURE && sym == sym->result)
+ && !(sym->attr.flavor == FL_PROCEDURE && sym->attr.proc_pointer)
+ && !(sym->attr.flavor == FL_PROCEDURE
+- && sym->attr.function && sym->attr.pointer))
++ && sym->attr.function && attr.pointer))
+ {
+ if (context)
+ gfc_error ("%qs in variable definition context (%s) at %L is not"
+--- a/src/gcc/fortran/primary.cc
++++ b/src/gcc/fortran/primary.cc
+@@ -3196,10 +3196,11 @@ gfc_convert_to_structure_constructor (gfc_expr *e, gfc_symbol *sym, gfc_expr **c
+ goto cleanup;
+
+ /* For a constant string constructor, make sure the length is
+- correct; truncate of fill with blanks if needed. */
++ correct; truncate or fill with blanks if needed. */
+ if (this_comp->ts.type == BT_CHARACTER && !this_comp->attr.allocatable
+ && this_comp->ts.u.cl && this_comp->ts.u.cl->length
+ && this_comp->ts.u.cl->length->expr_type == EXPR_CONSTANT
++ && this_comp->ts.u.cl->length->ts.type == BT_INTEGER
+ && actual->expr->ts.type == BT_CHARACTER
+ && actual->expr->expr_type == EXPR_CONSTANT)
+ {
+--- a/src/gcc/fortran/resolve.cc
++++ b/src/gcc/fortran/resolve.cc
+@@ -9923,11 +9923,6 @@ resolve_select_rank (gfc_code *code, gfc_namespace *old_ns)
+ || gfc_expr_attr (code->expr1).pointer))
+ gfc_error ("RANK (*) at %L cannot be used with the pointer or "
+ "allocatable selector at %L", &c->where, &code->expr1->where);
+-
+- if (case_value == -1 && (gfc_expr_attr (code->expr1).allocatable
+- || gfc_expr_attr (code->expr1).pointer))
+- gfc_error ("RANK (*) at %L cannot be used with the pointer or "
+- "allocatable selector at %L", &c->where, &code->expr1->where);
+ }
+
+ /* Add EXEC_SELECT to switch on rank. */
+@@ -12913,7 +12908,10 @@ resolve_fl_var_and_proc (gfc_symbol *sym, int mp_flag)
+
+ if (allocatable)
+ {
+- if (dimension && as->type != AS_ASSUMED_RANK)
++ if (dimension
++ && as
++ && as->type != AS_ASSUMED_RANK
++ && !sym->attr.select_rank_temporary)
+ {
+ gfc_error ("Allocatable array %qs at %L must have a deferred "
+ "shape or assumed rank", sym->name, &sym->declared_at);
+--- a/src/gcc/fortran/symbol.cc
++++ b/src/gcc/fortran/symbol.cc
+@@ -4719,6 +4719,13 @@ gfc_copy_formal_args_intr (gfc_symbol *dest, gfc_intrinsic_sym *src,
+ formal_arg->sym->attr.flavor = FL_VARIABLE;
+ formal_arg->sym->attr.dummy = 1;
+
++ /* Do not treat an actual deferred-length character argument wrongly
++ as template for the formal argument. */
++ if (formal_arg->sym->ts.type == BT_CHARACTER
++ && !(formal_arg->sym->attr.allocatable
++ || formal_arg->sym->attr.pointer))
++ formal_arg->sym->ts.deferred = false;
++
+ if (formal_arg->sym->ts.type == BT_CHARACTER)
+ formal_arg->sym->ts.u.cl = gfc_new_charlen (gfc_current_ns, NULL);
+
+--- a/src/gcc/fortran/trans-expr.cc
++++ b/src/gcc/fortran/trans-expr.cc
+@@ -7428,7 +7428,12 @@ gfc_conv_procedure_call (gfc_se * se, gfc_symbol * sym,
+ (and other intrinsics?) and dummy functions. In the case of SPREAD,
+ we take the character length of the first argument for the result.
+ For dummies, we have to look through the formal argument list for
+- this function and use the character length found there.*/
++ this function and use the character length found there.
++ Likewise, we handle the case of deferred-length character dummy
++ arguments to intrinsics that determine the characteristics of
++ the result, which cannot be deferred-length. */
++ if (expr->value.function.isym)
++ ts.deferred = false;
+ if (ts.deferred)
+ cl.backend_decl = gfc_create_var (gfc_charlen_type_node, "slen");
+ else if (!sym->attr.dummy)
+--- a/src/gcc/fwprop.cc
++++ b/src/gcc/fwprop.cc
+@@ -25,6 +25,7 @@ along with GCC; see the file COPYING3. If not see
+ #include "coretypes.h"
+ #include "backend.h"
+ #include "rtl.h"
++#include "rtlanal.h"
+ #include "df.h"
+ #include "rtl-ssa.h"
+
+@@ -353,21 +354,6 @@ reg_single_def_p (rtx x)
+ return REG_P (x) && crtl->ssa->single_dominating_def (REGNO (x));
+ }
+
+-/* Return true if X contains a paradoxical subreg. */
+-
+-static bool
+-contains_paradoxical_subreg_p (rtx x)
+-{
+- subrtx_var_iterator::array_type array;
+- FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
+- {
+- x = *iter;
+- if (SUBREG_P (x) && paradoxical_subreg_p (x))
+- return true;
+- }
+- return false;
+-}
+-
+ /* Try to substitute (set DEST SRC), which defines DEF, into note NOTE of
+ USE_INSN. Return the number of substitutions on success, otherwise return
+ -1 and leave USE_INSN unchanged.
+--- a/src/gcc/genmatch.cc
++++ b/src/gcc/genmatch.cc
+@@ -2548,7 +2548,8 @@ expr::gen_transform (FILE *f, int indent, const char *dest, bool gimple,
+ {
+ fprintf_indent (f, indent, "if (TREE_TYPE (_o%d[0]) != %s)\n",
+ depth, type);
+- indent += 2;
++ fprintf_indent (f, indent + 2, "{\n");
++ indent += 4;
+ }
+ if (opr->kind == id_base::CODE)
+ fprintf_indent (f, indent, "_r%d = fold_build%d_loc (loc, %s, %s",
+@@ -2571,7 +2572,8 @@ expr::gen_transform (FILE *f, int indent, const char *dest, bool gimple,
+ }
+ if (*opr == CONVERT_EXPR)
+ {
+- indent -= 2;
++ fprintf_indent (f, indent - 2, "}\n");
++ indent -= 4;
+ fprintf_indent (f, indent, "else\n");
+ fprintf_indent (f, indent, " _r%d = _o%d[0];\n", depth, depth);
+ }
+--- a/src/gcc/gimple-fold.cc
++++ b/src/gcc/gimple-fold.cc
+@@ -7770,12 +7770,11 @@ get_base_constructor (tree base, poly_int64_pod *bit_offset,
+ }
+ }
+
+-/* CTOR is CONSTRUCTOR of an array type. Fold a reference of SIZE bits
+- to the memory at bit OFFSET. When non-null, TYPE is the expected
+- type of the reference; otherwise the type of the referenced element
+- is used instead. When SIZE is zero, attempt to fold a reference to
+- the entire element which OFFSET refers to. Increment *SUBOFF by
+- the bit offset of the accessed element. */
++/* CTOR is a CONSTRUCTOR of an array or vector type. Fold a reference of SIZE
++ bits to the memory at bit OFFSET. If non-null, TYPE is the expected type of
++ the reference; otherwise the type of the referenced element is used instead.
++ When SIZE is zero, attempt to fold a reference to the entire element OFFSET
++ refers to. Increment *SUBOFF by the bit offset of the accessed element. */
+
+ static tree
+ fold_array_ctor_reference (tree type, tree ctor,
+@@ -7940,13 +7939,11 @@ fold_array_ctor_reference (tree type, tree ctor,
+ return type ? build_zero_cst (type) : NULL_TREE;
+ }
+
+-/* CTOR is CONSTRUCTOR of an aggregate or vector. Fold a reference
+- of SIZE bits to the memory at bit OFFSET. When non-null, TYPE
+- is the expected type of the reference; otherwise the type of
+- the referenced member is used instead. When SIZE is zero,
+- attempt to fold a reference to the entire member which OFFSET
+- refers to; in this case. Increment *SUBOFF by the bit offset
+- of the accessed member. */
++/* CTOR is a CONSTRUCTOR of a record or union type. Fold a reference of SIZE
++ bits to the memory at bit OFFSET. If non-null, TYPE is the expected type of
++ the reference; otherwise the type of the referenced member is used instead.
++ When SIZE is zero, attempt to fold a reference to the entire member OFFSET
++ refers to. Increment *SUBOFF by the bit offset of the accessed member. */
+
+ static tree
+ fold_nonarray_ctor_reference (tree type, tree ctor,
+@@ -7958,8 +7955,7 @@ fold_nonarray_ctor_reference (tree type, tree ctor,
+ unsigned HOST_WIDE_INT cnt;
+ tree cfield, cval;
+
+- FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), cnt, cfield,
+- cval)
++ FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), cnt, cfield, cval)
+ {
+ tree byte_offset = DECL_FIELD_OFFSET (cfield);
+ tree field_offset = DECL_FIELD_BIT_OFFSET (cfield);
+@@ -8031,6 +8027,19 @@ fold_nonarray_ctor_reference (tree type, tree ctor,
+ return NULL_TREE;
+
+ offset_int inner_offset = offset_int (offset) - bitoffset;
++
++ /* Integral bit-fields are left-justified on big-endian targets, so
++ we must arrange for native_encode_int to start at their MSB. */
++ if (DECL_BIT_FIELD (cfield) && INTEGRAL_TYPE_P (TREE_TYPE (cfield)))
++ {
++ if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
++ return NULL_TREE;
++ const unsigned int encoding_size
++ = GET_MODE_BITSIZE (SCALAR_INT_TYPE_MODE (TREE_TYPE (cfield)));
++ if (BYTES_BIG_ENDIAN)
++ inner_offset += encoding_size - wi::to_offset (field_size);
++ }
++
+ return fold_ctor_reference (type, cval,
+ inner_offset.to_uhwi (), size,
+ from_decl, suboff);
+@@ -8043,7 +8052,7 @@ fold_nonarray_ctor_reference (tree type, tree ctor,
+ return build_zero_cst (type);
+ }
+
+-/* CTOR is value initializing memory. Fold a reference of TYPE and
++/* CTOR is a value initializing memory. Fold a reference of TYPE and
+ bit size POLY_SIZE to the memory at bit POLY_OFFSET. When POLY_SIZE
+ is zero, attempt to fold a reference to the entire subobject
+ which OFFSET refers to. This is used when folding accesses to
+@@ -8084,7 +8093,8 @@ fold_ctor_reference (tree type, tree ctor, const poly_uint64 &poly_offset,
+ }
+ return ret;
+ }
+- /* For constants and byte-aligned/sized reads try to go through
++
++ /* For constants and byte-aligned/sized reads, try to go through
+ native_encode/interpret. */
+ if (CONSTANT_CLASS_P (ctor)
+ && BITS_PER_UNIT == 8
+@@ -8100,7 +8110,12 @@ fold_ctor_reference (tree type, tree ctor, const poly_uint64 &poly_offset,
+ if (len > 0)
+ return native_interpret_expr (type, buf, len);
+ }
+- if (TREE_CODE (ctor) == CONSTRUCTOR)
++
++ /* For constructors, try first a recursive local processing, but in any case
++ this requires the native storage order. */
++ if (TREE_CODE (ctor) == CONSTRUCTOR
++ && !(AGGREGATE_TYPE_P (TREE_TYPE (ctor))
++ && TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (ctor))))
+ {
+ unsigned HOST_WIDE_INT dummy = 0;
+ if (!suboff)
+@@ -8115,9 +8130,9 @@ fold_ctor_reference (tree type, tree ctor, const poly_uint64 &poly_offset,
+ ret = fold_nonarray_ctor_reference (type, ctor, offset, size,
+ from_decl, suboff);
+
+- /* Fall back to native_encode_initializer. Needs to be done
+- only in the outermost fold_ctor_reference call (because it itself
+- recurses into CONSTRUCTORs) and doesn't update suboff. */
++ /* Otherwise fall back to native_encode_initializer. This may be done
++ only from the outermost fold_ctor_reference call (because it itself
++ recurses into CONSTRUCTORs and doesn't update suboff). */
+ if (ret == NULL_TREE
+ && suboff == &dummy
+ && BITS_PER_UNIT == 8
+--- a/src/gcc/gimple-range-gori.cc
++++ b/src/gcc/gimple-range-gori.cc
+@@ -880,6 +880,7 @@ gori_compute::logical_combine (irange &r, enum tree_code code,
+ res = false;
+ if (idx)
+ tracer.trailer (idx, "logical_combine", res, NULL_TREE, r);
++ return res;
+ }
+
+ switch (code)
+--- a/src/gcc/gimple-ssa-store-merging.cc
++++ b/src/gcc/gimple-ssa-store-merging.cc
+@@ -4605,12 +4605,13 @@ imm_store_chain_info::output_merged_store (merged_store_group *group)
+ }
+ else if ((BYTES_BIG_ENDIAN ? start_gap : end_gap) > 0)
+ {
+- const unsigned HOST_WIDE_INT imask
+- = (HOST_WIDE_INT_1U << info->bitsize) - 1;
++ wide_int imask
++ = wi::mask (info->bitsize, false,
++ TYPE_PRECISION (TREE_TYPE (tem)));
+ tem = gimple_build (&seq, loc,
+ BIT_AND_EXPR, TREE_TYPE (tem), tem,
+- build_int_cst (TREE_TYPE (tem),
+- imask));
++ wide_int_to_tree (TREE_TYPE (tem),
++ imask));
+ }
+ const HOST_WIDE_INT shift
+ = (BYTES_BIG_ENDIAN ? end_gap : start_gap);
+--- a/src/gcc/gimplify.cc
++++ b/src/gcc/gimplify.cc
+@@ -6847,7 +6847,12 @@ gimplify_asm_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p)
+ stmt = gimple_build_asm_vec (TREE_STRING_POINTER (ASM_STRING (expr)),
+ inputs, outputs, clobbers, labels);
+
+- gimple_asm_set_volatile (stmt, ASM_VOLATILE_P (expr) || noutputs == 0);
++ /* asm is volatile if it was marked by the user as volatile or
++ there are no outputs or this is an asm goto. */
++ gimple_asm_set_volatile (stmt,
++ ASM_VOLATILE_P (expr)
++ || noutputs == 0
++ || labels);
+ gimple_asm_set_input (stmt, ASM_INPUT_P (expr));
+ gimple_asm_set_inline (stmt, ASM_INLINE_P (expr));
+
+--- a/src/gcc/go/ChangeLog
++++ b/src/gcc/go/ChangeLog
+@@ -1,3 +1,11 @@
++2023-06-28 Paul E. Murphy <murphyp@linux.ibm.com>
++
++ Backported from master:
++ 2023-06-22 Paul E. Murphy <murphyp@linux.ibm.com>
++
++ * go-backend.cc [TARGET_AIX]: Rename and update usage to TARGET_AIX_OS.
++ * go-lang.cc: Likewise.
++
+ 2023-05-08 Release Manager
+
+ * GCC 12.3.0 released.
+--- a/src/gcc/go/go-backend.cc
++++ b/src/gcc/go/go-backend.cc
+@@ -45,8 +45,8 @@ along with GCC; see the file COPYING3. If not see
+ #define GO_EXPORT_SECTION_NAME ".go_export"
+ #endif
+
+-#ifndef TARGET_AIX
+-#define TARGET_AIX 0
++#ifndef TARGET_AIX_OS
++#define TARGET_AIX_OS 0
+ #endif
+
+ /* This file holds all the cases where the Go frontend needs
+@@ -107,7 +107,7 @@ go_write_export_data (const char *bytes, unsigned int size)
+ {
+ gcc_assert (targetm_common.have_named_sections);
+ sec = get_section (GO_EXPORT_SECTION_NAME,
+- TARGET_AIX ? SECTION_EXCLUDE : SECTION_DEBUG,
++ TARGET_AIX_OS ? SECTION_EXCLUDE : SECTION_DEBUG,
+ NULL);
+ }
+
+--- a/src/gcc/go/go-lang.cc
++++ b/src/gcc/go/go-lang.cc
+@@ -39,8 +39,8 @@ along with GCC; see the file COPYING3. If not see
+ #include "go-c.h"
+ #include "go-gcc.h"
+
+-#ifndef TARGET_AIX
+-#define TARGET_AIX 0
++#ifndef TARGET_AIX_OS
++#define TARGET_AIX_OS 0
+ #endif
+
+ /* Language-dependent contents of a type. */
+@@ -119,9 +119,9 @@ go_langhook_init (void)
+ args.compiling_runtime = go_compiling_runtime;
+ args.debug_escape_level = go_debug_escape_level;
+ args.debug_escape_hash = go_debug_escape_hash;
+- args.nil_check_size_threshold = TARGET_AIX ? -1 : 4096;
++ args.nil_check_size_threshold = TARGET_AIX_OS ? -1 : 4096;
+ args.debug_optimization = go_debug_optimization;
+- args.need_eqtype = TARGET_AIX ? true : false;
++ args.need_eqtype = TARGET_AIX_OS ? true : false;
+ args.linemap = go_get_linemap();
+ args.backend = go_get_backend();
+ go_create_gogo (&args);
+--- a/src/gcc/go/gofrontend/expressions.cc
++++ b/src/gcc/go/gofrontend/expressions.cc
+@@ -12325,7 +12325,8 @@ Call_expression::intrinsify(Gogo* gogo,
+ return Runtime::make_call(code, loc, 3, a1, a2, a3);
+ }
+ }
+- else if (package == "internal/abi")
++ else if (package == "internal/abi"
++ || package == "bootstrap/internal/abi") // for bootstrapping gc
+ {
+ if (is_method)
+ return NULL;
+--- a/src/gcc/go/gofrontend/gogo.cc
++++ b/src/gcc/go/gofrontend/gogo.cc
+@@ -3331,6 +3331,9 @@ class Create_function_descriptors : public Traverse
+ int
+ expression(Expression**);
+
++ static bool
++ skip_descriptor(Gogo* gogo, const Named_object*);
++
+ private:
+ Gogo* gogo_;
+ };
+@@ -3341,6 +3344,9 @@ class Create_function_descriptors : public Traverse
+ int
+ Create_function_descriptors::function(Named_object* no)
+ {
++ if (Create_function_descriptors::skip_descriptor(this->gogo_, no))
++ return TRAVERSE_CONTINUE;
++
+ if (no->is_function()
+ && no->func_value()->enclosing() == NULL
+ && !no->func_value()->is_method()
+@@ -3428,6 +3434,28 @@ Create_function_descriptors::expression(Expression** pexpr)
+ return TRAVERSE_CONTINUE;
+ }
+
++// The gc compiler has some special cases that it always compiles as
++// intrinsics. For those we don't want to generate a function
++// descriptor, as there will be no code for it to refer to.
++
++bool
++Create_function_descriptors::skip_descriptor(Gogo* gogo,
++ const Named_object* no)
++{
++ const std::string& pkgpath(no->package() == NULL
++ ? gogo->pkgpath()
++ : no->package()->pkgpath());
++
++ // internal/abi is the standard library package,
++ // bootstrap/internal/abi is the name used when bootstrapping the gc
++ // compiler.
++
++ return ((pkgpath == "internal/abi"
++ || pkgpath == "bootstrap/internal/abi")
++ && (no->name() == "FuncPCABI0"
++ || no->name() == "FuncPCABIInternal"));
++}
++
+ // Create function descriptors as needed. We need a function
+ // descriptor for all exported functions and for all functions that
+ // are referenced without being called.
+@@ -3449,7 +3477,8 @@ Gogo::create_function_descriptors()
+ if (no->is_function_declaration()
+ && !no->func_declaration_value()->type()->is_method()
+ && !Linemap::is_predeclared_location(no->location())
+- && !Gogo::is_hidden_name(no->name()))
++ && !Gogo::is_hidden_name(no->name())
++ && !Create_function_descriptors::skip_descriptor(this, no))
+ fndecls.push_back(no);
+ }
+ for (std::vector<Named_object*>::const_iterator p = fndecls.begin();
+--- a/src/gcc/match.pd
++++ b/src/gcc/match.pd
+@@ -1723,7 +1723,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
+ /* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */
+ (simplify
+ (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
+- (bit_ior (bit_and @0 @2) (bit_and @1 @2)))
++ (bit_ior (bit_and @0 @2) (bit_and! @1 @2)))
+
+ /* Combine successive equal operations with constants. */
+ (for bitop (bit_and bit_ior bit_xor)
+@@ -1732,7 +1732,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
+ (if (!CONSTANT_CLASS_P (@0))
+ /* This is the canonical form regardless of whether (bitop @1 @2) can be
+ folded to a constant. */
+- (bitop @0 (bitop @1 @2))
++ (bitop @0 (bitop! @1 @2))
+ /* In this case we have three constants and (bitop @0 @1) doesn't fold
+ to a constant. This can happen if @0 or @1 is a POLY_INT_CST and if
+ the values involved are such that the operation can't be decided at
+@@ -2635,13 +2635,13 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
+ forever if something doesn't simplify into a constant. */
+ (if (!CONSTANT_CLASS_P (@0))
+ (if (outer_op == PLUS_EXPR)
+- (plus (view_convert @0) (inner_op @2 (view_convert @1)))
+- (minus (view_convert @0) (neg_inner_op @2 (view_convert @1)))))
++ (plus (view_convert @0) (inner_op! @2 (view_convert @1)))
++ (minus (view_convert @0) (neg_inner_op! @2 (view_convert @1)))))
+ (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
+ || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
+ (if (outer_op == PLUS_EXPR)
+- (view_convert (plus @0 (inner_op (view_convert @2) @1)))
+- (view_convert (minus @0 (neg_inner_op (view_convert @2) @1))))
++ (view_convert (plus @0 (inner_op! (view_convert @2) @1)))
++ (view_convert (minus @0 (neg_inner_op! (view_convert @2) @1))))
+ /* If the constant operation overflows we cannot do the transform
+ directly as we would introduce undefined overflow, for example
+ with (a - 1) + INT_MIN. */
+@@ -2672,10 +2672,10 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
+ /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
+ forever if something doesn't simplify into a constant. */
+ (if (!CONSTANT_CLASS_P (@0))
+- (minus (outer_op (view_convert @1) @2) (view_convert @0)))
++ (minus (outer_op! (view_convert @1) @2) (view_convert @0)))
+ (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
+ || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
+- (view_convert (minus (outer_op @1 (view_convert @2)) @0))
++ (view_convert (minus (outer_op! @1 (view_convert @2)) @0))
+ (if (types_match (type, @0))
+ (with { tree cst = const_binop (outer_op, type, @1, @2); }
+ (if (cst && !TREE_OVERFLOW (cst))
+@@ -2691,10 +2691,10 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
+ /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
+ forever if something doesn't simplify into a constant. */
+ (if (!CONSTANT_CLASS_P (@0))
+- (plus (view_convert @0) (minus @1 (view_convert @2))))
++ (plus (view_convert @0) (minus! @1 (view_convert @2))))
+ (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
+ || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
+- (view_convert (plus @0 (minus (view_convert @1) @2)))
++ (view_convert (plus @0 (minus! (view_convert @1) @2)))
+ (if (types_match (type, @0))
+ (with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); }
+ (if (cst && !TREE_OVERFLOW (cst))
+@@ -3711,19 +3711,19 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
+ int inside_ptr = POINTER_TYPE_P (inside_type);
+ int inside_float = FLOAT_TYPE_P (inside_type);
+ int inside_vec = VECTOR_TYPE_P (inside_type);
+- unsigned int inside_prec = TYPE_PRECISION (inside_type);
++ unsigned int inside_prec = element_precision (inside_type);
+ int inside_unsignedp = TYPE_UNSIGNED (inside_type);
+ int inter_int = INTEGRAL_TYPE_P (inter_type);
+ int inter_ptr = POINTER_TYPE_P (inter_type);
+ int inter_float = FLOAT_TYPE_P (inter_type);
+ int inter_vec = VECTOR_TYPE_P (inter_type);
+- unsigned int inter_prec = TYPE_PRECISION (inter_type);
++ unsigned int inter_prec = element_precision (inter_type);
+ int inter_unsignedp = TYPE_UNSIGNED (inter_type);
+ int final_int = INTEGRAL_TYPE_P (type);
+ int final_ptr = POINTER_TYPE_P (type);
+ int final_float = FLOAT_TYPE_P (type);
+ int final_vec = VECTOR_TYPE_P (type);
+- unsigned int final_prec = TYPE_PRECISION (type);
++ unsigned int final_prec = element_precision (type);
+ int final_unsignedp = TYPE_UNSIGNED (type);
+ }
+ (switch
+@@ -4186,6 +4186,10 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
+ /* (v ? w : 0) ? a : b is just (v & w) ? a : b
+ Currently disabled after pass lvec because ARM understands
+ VEC_COND_EXPR<v==w,-1,0> but not a plain v==w fed to BIT_IOR_EXPR. */
++#if GIMPLE
++/* These can only be done in gimple as fold likes to convert:
++ (CMP) & N into (CMP) ? N : 0
++ and we try to match the same pattern again and again. */
+ (simplify
+ (vec_cond (vec_cond:s @0 @3 integer_zerop) @1 @2)
+ (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
+@@ -4220,6 +4224,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
+ (vec_cond @0 @3 (vec_cond:s @1 @2 @3))
+ (if (optimize_vectors_before_lowering_p () && types_match (@0, @1))
+ (vec_cond (bit_and (bit_not @0) @1) @2 @3)))
++#endif
+
+ /* Canonicalize mask ? { 0, ... } : { -1, ...} to ~mask if the mask
+ types are compatible. */
+--- a/src/gcc/omp-expand.cc
++++ b/src/gcc/omp-expand.cc
+@@ -2564,7 +2564,8 @@ expand_omp_for_init_vars (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
+ tree factor = fd->factor;
+ gcond *cond_stmt
+ = expand_omp_build_cond (gsi, NE_EXPR, factor,
+- build_zero_cst (TREE_TYPE (factor)));
++ build_zero_cst (TREE_TYPE (factor)),
++ true);
+ edge e = split_block (gsi_bb (*gsi), cond_stmt);
+ basic_block bb0 = e->src;
+ e->flags = EDGE_TRUE_VALUE;
+--- a/src/gcc/rtlanal.cc
++++ b/src/gcc/rtlanal.cc
+@@ -6990,3 +6990,18 @@ vec_series_lowpart_p (machine_mode result_mode, machine_mode op_mode, rtx sel)
+ }
+ return false;
+ }
++
++/* Return true if X contains a paradoxical subreg. */
++
++bool
++contains_paradoxical_subreg_p (rtx x)
++{
++ subrtx_var_iterator::array_type array;
++ FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
++ {
++ x = *iter;
++ if (SUBREG_P (x) && paradoxical_subreg_p (x))
++ return true;
++ }
++ return false;
++}
+--- a/src/gcc/rtlanal.h
++++ b/src/gcc/rtlanal.h
+@@ -338,4 +338,6 @@ vec_series_highpart_p (machine_mode result_mode, machine_mode op_mode,
+ bool
+ vec_series_lowpart_p (machine_mode result_mode, machine_mode op_mode, rtx sel);
+
++bool
++contains_paradoxical_subreg_p (rtx x);
+ #endif
+--- a/src/gcc/testsuite/ChangeLog
++++ b/src/gcc/testsuite/ChangeLog
+@@ -1,3 +1,4679 @@
++2023-10-07 Andrew Pinski <pinskia@gmail.com>
++
++ Backported from master:
++ 2023-10-06 Andrew Pinski <pinskia@gmail.com>
++
++ PR middle-end/111699
++ * gcc.c-torture/compile/pr111699-1.c: New test.
++
++2023-10-02 Pat Haugen <pthaugen@linux.ibm.com>
++
++ Backported from master:
++ 2023-09-19 Pat Haugen <pthaugen@linux.ibm.com>
++
++ * gcc.target/powerpc/clone1.c: Add xfails.
++ * gcc.target/powerpc/clone3.c: Likewise.
++ * gcc.target/powerpc/mod-1.c: Update scan strings and add xfails.
++ * gcc.target/powerpc/mod-2.c: Likewise.
++ * gcc.target/powerpc/p10-vdivq-vmodq.c: Add xfails.
++
++2023-09-29 Wilco Dijkstra <wilco.dijkstra@arm.com>
++
++ Backported from master:
++ 2023-09-28 Wilco Dijkstra <wilco.dijkstra@arm.com>
++
++ PR target/111121
++ * gcc.target/aarch64/mops_4.c: Add memmove testcases.
++
++2023-09-26 Eric Botcazou <ebotcazou@adacore.com>
++
++ * gnat.dg/opt102.adb:New test.
++ * gnat.dg/opt102_pkg.adb, gnat.dg/opt102_pkg.ads: New helper.
++
++2023-09-20 Richard Sandiford <richard.sandiford@arm.com>
++
++ Backported from master:
++ 2023-09-15 Richard Sandiford <richard.sandiford@arm.com>
++
++ PR target/111411
++ * gcc.dg/rtl/aarch64/pr111411.c: New test.
++
++2023-09-12 Uros Bizjak <ubizjak@gmail.com>
++
++ PR target/111340
++ * gcc.target/i386/pr111340.c: New test.
++
++2023-09-12 Richard Sandiford <richard.sandiford@arm.com>
++
++ * gcc.target/aarch64/stack-protector-8.c: New test.
++ * gcc.target/aarch64/stack-protector-9.c: Likewise.
++
++2023-09-12 Richard Sandiford <richard.sandiford@arm.com>
++
++ * gcc.target/aarch64/sve/pcs/stack_clash_3.c: Avoid redundant probes.
++
++2023-09-12 Richard Sandiford <richard.sandiford@arm.com>
++
++ * gcc.target/aarch64/stack-check-prologue-17.c: Expect the probe
++ to be at offset 1024 rather than offset 0.
++ * gcc.target/aarch64/stack-check-prologue-18.c: Likewise.
++ * gcc.target/aarch64/stack-check-prologue-19.c: Likewise.
++
++2023-09-12 Richard Sandiford <richard.sandiford@arm.com>
++
++ * gcc.target/aarch64/stack-check-prologue-18.c: New test.
++ * gcc.target/aarch64/stack-check-prologue-19.c: Likewise.
++ * gcc.target/aarch64/stack-check-prologue-20.c: Likewise.
++
++2023-09-12 Richard Sandiford <richard.sandiford@arm.com>
++
++ * gcc.target/aarch64/stack-check-prologue-17.c: New test.
++
++2023-09-12 Haochen Gui <guihaoc@gcc.gnu.org>
++
++ Backported from master:
++ 2023-08-31 Haochen Gui <guihaoc@gcc.gnu.org>
++
++ PR target/96762
++ * gcc.target/powerpc/pr96762.c: New.
++
++2023-09-11 liuhongt <hongtao.liu@intel.com>
++
++ Backported from master:
++ 2023-09-11 liuhongt <hongtao.liu@intel.com>
++
++ * gcc.target/i386/pr111306.c: New test.
++
++2023-08-30 Jakub Jelinek <jakub@redhat.com>
++
++ Backported from master:
++ 2023-08-30 Jakub Jelinek <jakub@redhat.com>
++
++ PR tree-optimization/110914
++ * gcc.c-torture/execute/pr110914.c: New test.
++
++2023-08-30 Jakub Jelinek <jakub@redhat.com>
++
++ Backported from master:
++ 2023-08-30 Jakub Jelinek <jakub@redhat.com>
++
++ PR tree-optimization/111015
++ * gcc.dg/pr111015.c: New test.
++
++2023-08-16 liuhongt <hongtao.liu@intel.com>
++
++ Backported from master:
++ 2023-08-16 liuhongt <hongtao.liu@intel.com>
++
++ * gcc.target/i386/avx2-gather-2.c: Adjust options to keep
++ gather vectorization.
++ * gcc.target/i386/avx2-gather-6.c: Ditto.
++ * gcc.target/i386/avx512f-pr88464-1.c: Ditto.
++ * gcc.target/i386/avx512f-pr88464-5.c: Ditto.
++ * gcc.target/i386/avx512vl-pr88464-1.c: Ditto.
++ * gcc.target/i386/avx512vl-pr88464-11.c: Ditto.
++ * gcc.target/i386/avx512vl-pr88464-3.c: Ditto.
++ * gcc.target/i386/avx512vl-pr88464-9.c: Ditto.
++ * gcc.target/i386/pr88531-1b.c: Ditto.
++ * gcc.target/i386/pr88531-1c.c: Ditto.
++
++2023-08-15 Iain Buclaw <ibuclaw@gdcproject.org>
++
++ PR d/110959
++ * gdc.dg/pr110959.d: New test.
++ * gdc.test/runnable/test23010.d: New test.
++
++2023-08-11 Jason Merrill <jason@redhat.com>
++
++ PR c++/106310
++ * g++.dg/template/template-keyword4.C: New test.
++
++2023-08-11 Jason Merrill <jason@redhat.com>
++
++ PR c++/106890
++ PR c++/109666
++ * g++.dg/cpp0x/nsdmi-array2.C: New test.
++ * g++.dg/cpp0x/nsdmi-template25.C: New test.
++
++2023-08-11 Jason Merrill <jason@redhat.com>
++
++ PR c++/108099
++ * g++.dg/ext/int128-7.C: New test.
++ * g++.dg/ext/int128-8.C: New test.
++ * g++.dg/ext/unsigned-typedef2.C: New test.
++ * g++.dg/ext/unsigned-typedef3.C: New test.
++
++2023-08-07 Patrick Palka <ppalka@redhat.com>
++
++ Backported from master:
++ 2023-05-09 Patrick Palka <ppalka@redhat.com>
++
++ PR c++/109761
++ * g++.dg/cpp0x/noexcept78.C: New test.
++
++2023-08-06 Jakub Jelinek <jakub@redhat.com>
++
++ Backported from master:
++ 2022-12-19 Jakub Jelinek <jakub@redhat.com>
++
++ PR fortran/107397
++ * gfortran.dg/pr107397.f90: Adjust expected diagnostic wording and
++ add space between dg-error string and closing }.
++
++2023-08-06 Steve Kargl <kargl@gcc.gnu.org>
++
++ Backported from master:
++ 2022-12-18 Steve Kargl <kargl@gcc.gnu.org>
++
++ PR fortran/107397
++ * gfortran.dg/pr107397.f90: New test.
++
++2023-08-01 Kewen Lin <linkw@linux.ibm.com>
++
++ Backported from master:
++ 2023-07-26 Kewen Lin <linkw@linux.ibm.com>
++
++ PR target/110741
++ * g++.target/powerpc/pr110741.C: New test.
++
++2023-07-20 Harald Anlauf <anlauf@gmx.de>
++
++ Backported from master:
++ 2023-07-17 Harald Anlauf <anlauf@gmx.de>
++
++ PR fortran/95947
++ PR fortran/110658
++ * gfortran.dg/deferred_character_37.f90: New test.
++
++2023-07-19 Maciej W. Rozycki <macro@embecosm.com>
++
++ Backported from master:
++ 2023-07-11 Maciej W. Rozycki <macro@embecosm.com>
++
++ * gcc.dg/vect/pr97428.c: Limit to `vect_double' targets.
++
++2023-07-14 Uros Bizjak <ubizjak@gmail.com>
++
++ Backported from master:
++ 2023-07-14 Uros Bizjak <ubizjak@gmail.com>
++
++ PR target/110206
++ * gcc.target/i386/pr110206.c: New test.
++
++2023-07-14 Harald Anlauf <anlauf@gmx.de>
++
++ Backported from master:
++ 2023-07-11 Harald Anlauf <anlauf@gmx.de>
++
++ PR fortran/110288
++ * gfortran.dg/findloc_10.f90: New test.
++
++2023-07-13 Uros Bizjak <ubizjak@gmail.com>
++
++ Backported from master:
++ 2023-07-13 Uros Bizjak <ubizjak@gmail.com>
++
++ PR target/106966
++ * gcc.target/alpha/pr106966.c: New test.
++
++2023-07-12 Patrick Palka <ppalka@redhat.com>
++
++ Backported from master:
++ 2023-06-29 Patrick Palka <ppalka@redhat.com>
++
++ PR c++/110468
++ * g++.dg/cpp0x/noexcept79.C: New test.
++
++2023-07-08 Harald Anlauf <anlauf@gmx.de>
++
++ Backported from master:
++ 2023-07-08 Harald Anlauf <anlauf@gmx.de>
++
++ PR fortran/110585
++ * gfortran.dg/findloc_9.f90: New test.
++
++2023-07-07 Iain Buclaw <ibuclaw@gdcproject.org>
++
++ Backported from master:
++ 2023-07-07 Iain Buclaw <ibuclaw@gdcproject.org>
++
++ PR d/108842
++ * gdc.dg/pr98277.d: Add more tests.
++ * gdc.dg/pr108842.d: New test.
++
++2023-07-05 Michael Meissner <meissner@linux.ibm.com>
++
++ Backported from master:
++ 2023-06-23 Michael Meissner <meissner@linux.ibm.com>
++ Aaron Sawdey <acsawdey@linux.ibm.com>
++
++ PR target/105325
++ * g++.target/powerpc/pr105325.C: New test.
++ * gcc.target/powerpc/fusion-p10-ldcmpi.c: Update insn counts.
++
++2023-07-02 Iain Buclaw <ibuclaw@gdcproject.org>
++
++ Backported from master:
++ 2023-07-02 Iain Buclaw <ibuclaw@gdcproject.org>
++
++ PR d/110516
++ * gdc.dg/torture/pr110516a.d: New test.
++ * gdc.dg/torture/pr110516b.d: New test.
++
++2023-07-01 Iain Buclaw <ibuclaw@gdcproject.org>
++
++ Backported from master:
++ 2023-07-01 Iain Buclaw <ibuclaw@gdcproject.org>
++
++ PR d/110514
++ * gdc.dg/pr110514a.d: New test.
++ * gdc.dg/pr110514b.d: New test.
++ * gdc.dg/pr110514c.d: New test.
++ * gdc.dg/pr110514d.d: New test.
++
++2023-06-30 Eric Botcazou <ebotcazou@adacore.com>
++
++ * gcc.c-torture/execute/20230630-1.c: New test.
++ * gcc.c-torture/execute/20230630-2.c: Likewise.
++ * gcc.c-torture/execute/20230630-3.c: Likewise
++ * gcc.c-torture/execute/20230630-4.c: Likewise
++
++2023-06-29 liuhongt <hongtao.liu@intel.com>
++
++ * gcc.target/i386/pr110309.c: New test.
++
++2023-06-29 Hongyu Wang <hongyu.wang@intel.com>
++
++ Backported from master:
++ 2023-06-26 Hongyu Wang <hongyu.wang@intel.com>
++
++ * gcc.target/i386/mvc17.c: New test.
++
++2023-06-28 liuhongt <hongtao.liu@intel.com>
++
++ * gcc.target/i386/avx-vzeroupper-29.c: New testcase.
++ * gcc.target/i386/avx-vzeroupper-12.c: Adjust testcase.
++ * gcc.target/i386/avx-vzeroupper-7.c: Ditto.
++ * gcc.target/i386/avx-vzeroupper-9.c: Ditto.
++
++2023-06-27 Andrew Pinski <apinski@marvell.com>
++
++ Backported from master:
++ 2023-06-27 Andrew Pinski <apinski@marvell.com>
++
++ PR middle-end/110420
++ PR middle-end/103979
++ PR middle-end/98619
++ * gcc.c-torture/compile/asmgoto-6.c: New test.
++
++2023-06-26 Iain Buclaw <ibuclaw@gdcproject.org>
++
++ Backported from master:
++ 2023-06-26 Iain Buclaw <ibuclaw@gdcproject.org>
++
++ PR d/110359
++ * gdc.dg/pr110359.d: New test.
++
++2023-06-26 Iain Buclaw <ibuclaw@gdcproject.org>
++
++ Backported from master:
++ 2023-06-26 Iain Buclaw <ibuclaw@gdcproject.org>
++
++ PR d/110113
++ * gdc.test/compilable/test23978.d: New test.
++
++2023-06-23 Richard Biener <rguenther@suse.de>
++
++ Backported from master:
++ 2023-06-19 Richard Biener <rguenther@suse.de>
++
++ PR tree-optimization/110298
++ * gcc.dg/torture/pr110298.c: New testcase.
++
++2023-06-22 Alex Coplan <alex.coplan@arm.com>
++
++ Backported from master:
++ 2023-06-07 Alex Coplan <alex.coplan@arm.com>
++
++ PR target/110132
++ * lib/target-supports.exp (check_effective_target_aarch64_asm_FUNC_ok):
++ Extend to ls64.
++ * g++.target/aarch64/acle/acle.exp: New.
++ * g++.target/aarch64/acle/ls64.C: New test.
++ * g++.target/aarch64/acle/ls64_lto.C: New test.
++ * gcc.target/aarch64/acle/ls64_lto.c: New test.
++ * gcc.target/aarch64/acle/pr110132.c: New test.
++
++2023-06-22 Alex Coplan <alex.coplan@arm.com>
++
++ Backported from master:
++ 2023-06-07 Alex Coplan <alex.coplan@arm.com>
++
++ PR target/110100
++ * gcc.target/aarch64/acle/pr110100.c: New test.
++
++2023-06-20 Kewen Lin <linkw@linux.ibm.com>
++
++ Backported from master:
++ 2023-06-13 Kewen Lin <linkw@linux.ibm.com>
++
++ PR testsuite/110230
++ PR target/109932
++ * gcc.target/powerpc/pr109932-1.c: Adjust with int128 effective target.
++ * gcc.target/powerpc/pr109932-2.c: Ditto.
++
++2023-06-20 Kewen Lin <linkw@linux.ibm.com>
++
++ Backported from master:
++ 2023-06-12 Kewen Lin <linkw@linux.ibm.com>
++
++ PR target/109932
++ * gcc.target/powerpc/pr109932-1.c: New test.
++ * gcc.target/powerpc/pr109932-2.c: New test.
++
++2023-06-20 Kewen Lin <linkw@linux.ibm.com>
++
++ Backported from master:
++ 2023-06-12 Kewen Lin <linkw@linux.ibm.com>
++
++ PR target/110011
++ * gcc.target/powerpc/pr110011.c: New test.
++
++2023-06-15 Xi Ruoyao <xry111@xry111.site>
++
++ Backported from master:
++ 2023-03-07 Xi Ruoyao <xry111@xry111.site>
++
++ * gcc.target/aarch64/shrink_wrap_1.c (dg-options): Add
++ -fno-stack-protector.
++ * gcc.target/aarch64/stack-check-cfa-1.c (dg-options): Add
++ -fno-stack-protector.
++ * gcc.target/aarch64/stack-check-cfa-2.c (dg-options): Add
++ -fno-stack-protector.
++ * gcc.target/aarch64/test_frame_17.c (dg-options): Add
++ -fno-stack-protector.
++
++2023-06-15 Xi Ruoyao <xry111@xry111.site>
++
++ Backported from master:
++ 2023-03-07 Xi Ruoyao <xry111@xry111.site>
++
++ * gcc.target/aarch64/pr104005.c (dg-options): Add
++ -fno-stack-protector.
++
++2023-06-15 Xi Ruoyao <xry111@xry111.site>
++
++ Backported from master:
++ 2023-03-07 Xi Ruoyao <xry111@xry111.site>
++
++ * gcc.target/aarch64/auto-init-7.c (dg-options): Add
++ -fno-stack-protector.
++
++2023-06-15 Xi Ruoyao <xry111@xry111.site>
++
++ Backported from master:
++ 2023-03-07 Xi Ruoyao <xry111@xry111.site>
++
++ * gcc.target/aarch64/pr103147-10.c (dg-options): Add
++ -fno-stack-protector.
++ * g++.target/aarch64/pr103147-10.C: Likewise.
++
++2023-06-15 Xi Ruoyao <xry111@xry111.site>
++
++ Backported from master:
++ 2023-03-07 Xi Ruoyao <xry111@xry111.site>
++
++ * gcc.target/aarch64/sve/pcs/aarch64-sve-pcs.exp (sve_flags):
++ Add -fno-stack-protector.
++
++2023-06-15 Xi Ruoyao <xry111@xry111.site>
++
++ Backported from master:
++ 2023-03-07 Xi Ruoyao <xry111@xry111.site>
++
++ PR testsuite/70150
++ * gcc.target/aarch64/fuse_adrp_add_1.c (dg-options): Add
++ -fno-pie.
++
++2023-06-15 Xi Ruoyao <xry111@xry111.site>
++
++ Backported from master:
++ 2023-03-07 Xi Ruoyao <xry111@xry111.site>
++
++ PR testsuite/70150
++ * gcc.dg/tls/pr78796.c (dg-additional-options): Add -fno-pie
++ -no-pie for aarch64-*-*.
++ * gcc.target/aarch64/pr63304_1.c (dg-options): Add -fno-pie.
++ * gcc.target/aarch64/pr70120-2.c (dg-options): Add -fno-pie.
++ * gcc.target/aarch64/pr78733.c (dg-options): Add -fno-pie.
++ * gcc.target/aarch64/pr79041-2.c (dg-options): Add -fno-pie.
++ * gcc.target/aarch64/pr94530.c (dg-options): Add -fno-pie.
++ * gcc.target/aarch64/pr94577.c (dg-options): Add -fno-pie.
++ * gcc.target/aarch64/reload-valid-spoff.c (dg-options): Add
++ -fno-pie.
++
++2023-06-15 Xi Ruoyao <xry111@xry111.site>
++
++ Backported from master:
++ 2023-03-07 Xi Ruoyao <xry111@xry111.site>
++
++ PR testsuite/70150
++ * gcc.target/aarch64/aapcs64/aapcs64.exp (additional_flags):
++ Add -fno-pie -no-pie.
++
++2023-06-10 Georg-Johann Lay <avr@gjlay.de>
++
++ PR target/109650
++ Backport from 2023-05-10 master r14-1688.
++ * gcc.target/avr/torture/pr109650-1.c: New test.
++ * gcc.target/avr/torture/pr109650-2.c: New test.
++
++2023-06-09 Iain Sandoe <iain@sandoe.co.uk>
++
++ Backported from master:
++ 2023-06-02 Iain Sandoe <iain@sandoe.co.uk>
++
++ PR target/110044
++ * gcc.target/powerpc/darwin-abi-13-0.c: New test.
++ * gcc.target/powerpc/darwin-abi-13-1.c: New test.
++ * gcc.target/powerpc/darwin-abi-13-2.c: New test.
++ * gcc.target/powerpc/darwin-structs-0.h: New test.
++
++2023-06-09 liuhongt <hongtao.liu@intel.com>
++
++ * gcc.target/i386/pr110108-2.c: New test.
++
++2023-06-08 Alex Coplan <alex.coplan@arm.com>
++
++ Backported from master:
++ 2023-05-25 Alex Coplan <alex.coplan@arm.com>
++
++ PR target/109800
++ * gcc.target/arm/pure-code/pr109800.c: New test.
++
++2023-06-08 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
++
++ Backported from master:
++ 2023-05-24 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
++
++ PR target/109939
++ * gcc.target/arm/pr109939.c: New test.
++
++2023-06-04 Steve Kargl <kargl@gcc.gnu.org>
++
++ Backported from master:
++ 2023-06-02 Steve Kargl <kargl@gcc.gnu.org>
++
++ PR fortran/100607
++ * gfortran.dg/select_rank_6.f90: New test.
++
++2023-05-30 Christophe Lyon <christophe.lyon@linaro.org>
++
++ Backported from master:
++ 2023-05-30 Christophe Lyon <christophe.lyon@linaro.org>
++
++ * gcc.target/arm/mve/intrinsics/mve_intrinsic_type_overloads-int.c:
++ Support both definitions of int32_t.
++
++2023-05-25 Georg-Johann Lay <avr@gjlay.de>
++
++ PR target/82931
++ * gcc.target/avr/pr82931.c: New test.
++
++2023-05-22 Michael Meissner <meissner@linux.ibm.com>
++
++ PR target/70243
++ * gcc.target/powerpc/pr70243.c: New test. Back port from master
++ 04/10/2023 change.
++
++2023-05-22 Jakub Jelinek <jakub@redhat.com>
++
++ Backported from master:
++ 2023-05-21 Jakub Jelinek <jakub@redhat.com>
++
++ PR tree-optimization/109505
++ * gcc.target/aarch64/sve/pr109505.c: New test.
++
++2023-05-20 Harald Anlauf <anlauf@gmx.de>
++
++ Backported from master:
++ 2023-05-15 Harald Anlauf <anlauf@gmx.de>
++
++ PR fortran/109846
++ * gfortran.dg/ptr-func-5.f90: New test.
++
++2023-05-20 Triffid Hunter <triffid.hunter@gmail.com>
++
++ PR target/105753
++ Backport from 2023-05-20 https://gcc.gnu.org/r14-1016
++ * gcc.target/avr/torture/pr105753.c: New test.
++
++2023-05-19 Patrick Palka <ppalka@redhat.com>
++
++ Backported from master:
++ 2023-05-15 Patrick Palka <ppalka@redhat.com>
++
++ * g++.dg/cpp23/feat-cxx2b.C: Test __cpp_auto_cast.
++
++2023-05-18 Alexandre Oliva <oliva@adacore.com>
++
++ * gcc.target/arm/acle/cde-mve-full-assembly.c: Drop blank
++ after tab after vmsr, and lower the case of P0.
++
++2023-05-18 Stam Markianos-Wright <stam.markianos-wright@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/srshr.c: Update shift value.
++ * gcc.target/arm/mve/intrinsics/srshrl.c: Update shift value.
++ * gcc.target/arm/mve/intrinsics/uqshl.c: Update shift value.
++ * gcc.target/arm/mve/intrinsics/uqshll.c: Update shift value.
++ * gcc.target/arm/mve/intrinsics/urshr.c: Update shift value.
++ * gcc.target/arm/mve/intrinsics/urshrl.c: Update shift value.
++ * gcc.target/arm/mve/intrinsics/vadciq_m_s32.c: Update to ubfx.
++ * gcc.target/arm/mve/intrinsics/vadciq_m_u32.c: Update to ubfx.
++ * gcc.target/arm/mve/intrinsics/vadciq_s32.c: Update to ubfx.
++ * gcc.target/arm/mve/intrinsics/vadciq_u32.c: Update to ubfx.
++ * gcc.target/arm/mve/intrinsics/vadcq_m_s32.c: Update to ubfx.
++ * gcc.target/arm/mve/intrinsics/vadcq_m_u32.c: Update to ubfx.
++ * gcc.target/arm/mve/intrinsics/vadcq_s32.c: Update to ubfx.
++ * gcc.target/arm/mve/intrinsics/vadcq_u32.c: Update to ubfx.
++ * gcc.target/arm/mve/intrinsics/vsbciq_m_s32.c: Update to ubfx.
++ * gcc.target/arm/mve/intrinsics/vsbciq_m_u32.c: Update to ubfx.
++ * gcc.target/arm/mve/intrinsics/vsbciq_s32.c: Update to ubfx.
++ * gcc.target/arm/mve/intrinsics/vsbciq_u32.c: Update to ubfx.
++ * gcc.target/arm/mve/intrinsics/vsbcq_m_s32.c: Update to ubfx.
++ * gcc.target/arm/mve/intrinsics/vsbcq_m_u32.c: Update to ubfx.
++ * gcc.target/arm/mve/intrinsics/vsbcq_s32.c: Update to ubfx.
++ * gcc.target/arm/mve/intrinsics/vsbcq_u32.c: Update to ubfx.
++ * gcc.target/arm/mve/mve_const_shifts.c: New test.
++
++2023-05-18 Stam Markianos-Wright <stam.markianos-wright@arm.com>
++
++ PR target/109697
++ * gcc.target/arm/mve/intrinsics/vcmpcsq_n_u16.c: XFAIL check.
++ * gcc.target/arm/mve/intrinsics/vcmpcsq_n_u32.c: XFAIL check.
++ * gcc.target/arm/mve/intrinsics/vcmpcsq_n_u8.c: XFAIL check.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_n_f16.c: XFAIL check.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_n_f32.c: XFAIL check.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_n_u16.c: XFAIL check.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_n_u32.c: XFAIL check.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_n_u8.c: XFAIL check.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_n_f16.c: XFAIL check.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_n_f32.c: XFAIL check.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_n_f16.c: XFAIL check.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_n_f32.c: XFAIL check.
++ * gcc.target/arm/mve/intrinsics/vcmphiq_n_u16.c: XFAIL check.
++ * gcc.target/arm/mve/intrinsics/vcmphiq_n_u32.c: XFAIL check.
++ * gcc.target/arm/mve/intrinsics/vcmphiq_n_u8.c: XFAIL check.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_n_f16.c: XFAIL check.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_n_f32.c: XFAIL check.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_n_f16.c: XFAIL check.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_n_f32.c: XFAIL check.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_n_f16.c: XFAIL check.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_n_f32.c: XFAIL check.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_n_u16.c: XFAIL check.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_n_u32.c: XFAIL check.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_n_u8.c: XFAIL check.
++ * gcc.target/arm/mve/pr108177-1.c: Relax registers.
++ * gcc.target/arm/mve/pr108177-10.c: Relax registers.
++ * gcc.target/arm/mve/pr108177-11.c: Relax registers.
++ * gcc.target/arm/mve/pr108177-12.c: Relax registers.
++ * gcc.target/arm/mve/pr108177-13.c: Relax registers.
++ * gcc.target/arm/mve/pr108177-13-run.c: use mve_fp
++ * gcc.target/arm/mve/pr108177-14.c: Relax registers.
++ * gcc.target/arm/mve/pr108177-14-run.c: use mve_fp
++ * gcc.target/arm/mve/pr108177-2.c: Relax registers.
++ * gcc.target/arm/mve/pr108177-3.c: Relax registers.
++ * gcc.target/arm/mve/pr108177-4.c: Relax registers.
++ * gcc.target/arm/mve/pr108177-5.c: Relax registers.
++ * gcc.target/arm/mve/pr108177-6.c: Relax registers.
++ * gcc.target/arm/mve/pr108177-7.c: Relax registers.
++ * gcc.target/arm/mve/pr108177-8.c: Relax registers.
++ * gcc.target/arm/mve/pr108177-9.c: Relax registers.
++
++2023-05-18 Stam Markianos-Wright <stam.markianos-wright@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/mve_fp_vaddq_n.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vaddq_m.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vaddq_n.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vddupq_m_n_u16.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vddupq_m_n_u32.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vddupq_m_n_u8.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vddupq_n_u16.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vddupq_n_u32.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vddupq_n_u8.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vddupq_x_n_u16.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vddupq_x_n_u32.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vddupq_x_n_u8.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vdwdupq_x_n_u16.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vdwdupq_x_n_u32.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vdwdupq_x_n_u8.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vidupq_m_n_u16.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vidupq_m_n_u32.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vidupq_m_n_u8.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vidupq_n_u16.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vidupq_n_u32.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vidupq_n_u8.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vidupq_x_n_u16.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vidupq_x_n_u32.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vidupq_x_n_u8.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_viwdupq_x_n_u16.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_viwdupq_x_n_u32.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_viwdupq_x_n_u8.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrdq_gather_offset_s64.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrdq_gather_offset_u64.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrdq_gather_offset_z_s64.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrdq_gather_offset_z_u64.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrdq_gather_shifted_offset_s64.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrdq_gather_shifted_offset_u64.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrdq_gather_shifted_offset_z_s64.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrdq_gather_shifted_offset_z_u64.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_offset_f16.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_offset_s16.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_offset_s32.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_offset_u16.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_offset_u32.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_offset_z_f16.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_offset_z_s16.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_offset_z_s32.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_offset_z_u16.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_offset_z_u32.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_shifted_offset_f16.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_shifted_offset_s16.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_shifted_offset_s32.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_shifted_offset_u16.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_shifted_offset_u32.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_shifted_offset_z_f16.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_shifted_offset_z_s16.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_shifted_offset_z_s32.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_shifted_offset_z_u16.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_shifted_offset_z_u32.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrwq_gather_offset_f32.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrwq_gather_offset_s32.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrwq_gather_offset_u32.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrwq_gather_offset_z_f32.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrwq_gather_offset_z_s32.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrwq_gather_offset_z_u32.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrwq_gather_shifted_offset_f32.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrwq_gather_shifted_offset_s32.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrwq_gather_shifted_offset_u32.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrwq_gather_shifted_offset_z_f32.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrwq_gather_shifted_offset_z_s32.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vldrwq_gather_shifted_offset_z_u32.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vstore_scatter_shifted_offset.c: Removed.
++ * gcc.target/arm/mve/intrinsics/mve_vstore_scatter_shifted_offset_p.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_n_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_n_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_n_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_n_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_n_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_n_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_n_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_n_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_n_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_n_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_m_n_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_m_n_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_n_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_n_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_m_n_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_m_n_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_n_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_n_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_n_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_n_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vfmaq_m_n_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vfmaq_m_n_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vfmaq_n_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vfmaq_n_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vfmasq_m_n_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vfmasq_m_n_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vfmasq_n_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vfmasq_n_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vmaxnmavq_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vmaxnmavq_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vmaxnmavq_p_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vmaxnmavq_p_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vmaxnmvq_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vmaxnmvq_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vmaxnmvq_p_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vmaxnmvq_p_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vminnmavq_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vminnmavq_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vminnmavq_p_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vminnmavq_p_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vminnmvq_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vminnmvq_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vminnmvq_p_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vminnmvq_p_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_n_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_n_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vmulq_n_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vmulq_n_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_n_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_n_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vsetq_lane_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vsetq_lane_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_n_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_n_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vsubq_n_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vsubq_n_f32-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_n_f16-1.c: Removed.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_n_f32-1.c: Removed.
++
++2023-05-18 Stam Markianos-Wright <stam.markianos-wright@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/mve_intrinsic_type_overloads-fp.c: Add testcases.
++ * gcc.target/arm/mve/intrinsics/mve_intrinsic_type_overloads-int.c: Add testcases.
++
++2023-05-18 Stam Markianos-Wright <stam.markianos-wright@arm.com>
++
++ Backported from master:
++ 2023-05-18 Stam Markianos-Wright <stam.markianos-wright@arm.com>
++
++ * gcc.target/arm/mve/mve_vadcq_vsbcq_fpscr_overwrite.c: New.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/asrl.c: Use
++ check-function-bodies instead of scan-assembler checks. Use
++ extern "C" for C++ testing.
++ * gcc.target/arm/mve/intrinsics/lsll.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/sqrshr.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/sqrshrl_sat48.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/sqshl.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/sqshll.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/srshr.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/srshrl.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/uqrshl.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/uqrshll_sat48.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/uqshl.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/uqshll.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/urshr.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/urshrl.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vadciq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vadciq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vadciq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vadciq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vadcq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vadcq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vadcq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vadcq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vandq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vandq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vandq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vandq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vandq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vandq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vandq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vandq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vandq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vandq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vandq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vandq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vandq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vandq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vandq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vandq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vandq_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vandq_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vandq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vandq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vandq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vandq_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vandq_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vandq_x_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbicq_x_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbrsrq_m_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbrsrq_m_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbrsrq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbrsrq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbrsrq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbrsrq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbrsrq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbrsrq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbrsrq_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbrsrq_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbrsrq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbrsrq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbrsrq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbrsrq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbrsrq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbrsrq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbrsrq_x_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbrsrq_x_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbrsrq_x_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbrsrq_x_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbrsrq_x_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbrsrq_x_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbrsrq_x_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vbrsrq_x_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vctp16q.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vctp16q_m.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vctp32q.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vctp32q_m.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vctp64q.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vctp64q_m.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vctp8q.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vctp8q_m.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtaq_m_s16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtaq_m_s32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtaq_m_u16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtaq_m_u32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtaq_s16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtaq_s32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtaq_u16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtaq_u32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtaq_x_s16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtaq_x_s32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtaq_x_u16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtaq_x_u32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtbq_f16_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtbq_f32_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtbq_m_f16_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtbq_m_f32_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtbq_x_f32_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtmq_m_s16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtmq_m_s32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtmq_m_u16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtmq_m_u32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtmq_s16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtmq_s32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtmq_u16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtmq_u32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtmq_x_s16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtmq_x_s32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtmq_x_u16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtmq_x_u32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtnq_m_s16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtnq_m_s32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtnq_m_u16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtnq_m_u32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtnq_s16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtnq_s32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtnq_u16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtnq_u32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtnq_x_s16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtnq_x_s32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtnq_x_u16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtnq_x_u32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtpq_m_s16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtpq_m_s32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtpq_m_u16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtpq_m_u32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtpq_s16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtpq_s32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtpq_u16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtpq_u32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtpq_x_s16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtpq_x_s32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtpq_x_u16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtpq_x_u32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_f16_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_f16_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_f32_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_f32_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_m_f16_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_m_f16_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_m_f32_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_m_f32_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_m_n_f16_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_m_n_f16_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_m_n_f32_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_m_n_f32_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_m_n_s16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_m_n_s32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_m_n_u16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_m_n_u32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_m_s16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_m_s32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_m_u16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_m_u32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_n_f16_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_n_f16_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_n_f32_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_n_f32_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_n_s16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_n_s32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_n_u16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_n_u32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_s16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_s32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_u16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_u32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_x_f16_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_x_f16_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_x_f32_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_x_f32_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_x_n_f16_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_x_n_f16_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_x_n_f32_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_x_n_f32_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_x_n_s16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_x_n_s32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_x_n_u16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_x_n_u32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_x_s16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_x_s32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_x_u16_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvtq_x_u32_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvttq_f16_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvttq_f32_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvttq_m_f16_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvttq_m_f32_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcvttq_x_f32_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/veorq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/veorq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/veorq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/veorq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/veorq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/veorq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/veorq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/veorq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/veorq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/veorq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/veorq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/veorq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/veorq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/veorq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/veorq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/veorq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/veorq_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/veorq_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/veorq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/veorq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/veorq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/veorq_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/veorq_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/veorq_x_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vfmaq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vfmaq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vfmaq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vfmaq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vfmaq_m_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vfmaq_m_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vfmaq_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vfmaq_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vfmasq_m_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vfmasq_m_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vfmasq_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vfmasq_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vfmsq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vfmsq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vfmsq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vfmsq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhcaddq_rot270_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhcaddq_rot270_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhcaddq_rot270_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhcaddq_rot270_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhcaddq_rot270_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhcaddq_rot270_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhcaddq_rot270_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhcaddq_rot270_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhcaddq_rot270_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhcaddq_rot90_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhcaddq_rot90_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhcaddq_rot90_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhcaddq_rot90_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhcaddq_rot90_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhcaddq_rot90_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhcaddq_rot90_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhcaddq_rot90_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhcaddq_rot90_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavq_p_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavq_p_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavq_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavq_p_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavxq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavxq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavxq_p_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavxq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavxq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavxq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaldavaq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaldavaq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaldavaq_p_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaldavaq_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaldavaq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaldavaq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaldavaq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaldavaq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaldavaxq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaldavaxq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaldavaxq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaldavaxq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaldavq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaldavq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaldavq_p_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaldavq_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaldavq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaldavq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaldavq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaldavq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaldavxq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaldavxq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaldavxq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaldavxq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsdavaq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsdavaq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsdavaq_p_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsdavaq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsdavaq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsdavaq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsdavaxq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsdavaxq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsdavaxq_p_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsdavaxq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsdavaxq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsdavaxq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsdavq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsdavq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsdavq_p_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsdavq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsdavq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsdavq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsdavxq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsdavxq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsdavxq_p_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsdavxq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsdavxq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsdavxq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsldavaq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsldavaq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsldavaq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsldavaq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsldavaxq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsldavaxq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsldavaxq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsldavaxq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsldavq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsldavq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsldavq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsldavq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsldavxq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsldavxq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsldavxq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlsldavxq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovlbq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovlbq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovlbq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovlbq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovlbq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovlbq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovlbq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovlbq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovlbq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovlbq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovlbq_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovlbq_x_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovltq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovltq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovltq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovltq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovltq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovltq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovltq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovltq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovltq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovltq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovltq_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovltq_x_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovnbq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovnbq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovnbq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovnbq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovnbq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovnbq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovnbq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovnbq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovntq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovntq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovntq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovntq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovntq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovntq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovntq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmovntq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmvnq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmvnq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmvnq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmvnq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmvnq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmvnq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmvnq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmvnq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmvnq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmvnq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmvnq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmvnq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmvnq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmvnq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmvnq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmvnq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmvnq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmvnq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmvnq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmvnq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmvnq_x_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmvnq_x_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmvnq_x_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmvnq_x_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmvnq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmvnq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmvnq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmvnq_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmvnq_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmvnq_x_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vornq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vornq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vornq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vornq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vornq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vornq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vornq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vornq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vornq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vornq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vornq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vornq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vornq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vornq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vornq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vornq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vornq_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vornq_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vornq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vornq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vornq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vornq_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vornq_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vornq_x_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vorrq_x_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vpnot.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vpselq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vpselq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vpselq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vpselq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vpselq_s64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vpselq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vpselq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vpselq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vpselq_u64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vpselq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqmovnbq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqmovnbq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqmovnbq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqmovnbq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqmovnbq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqmovnbq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqmovnbq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqmovnbq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqmovntq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqmovntq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqmovntq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqmovntq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqmovntq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqmovntq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqmovntq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqmovntq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqmovunbq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqmovunbq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqmovunbq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqmovunbq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqmovuntq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqmovuntq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqmovuntq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqmovuntq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmladhq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmladhq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmladhq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmladhq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmladhq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmladhq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmladhxq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmladhxq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmladhxq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmladhxq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmladhxq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmladhxq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlahq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlahq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlahq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlashq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlashq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlashq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlashq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlashq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlashq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlsdhq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlsdhq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlsdhq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlsdhq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlsdhq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlsdhq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmulhq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmulhq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmulhq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmulhq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmulhq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmulhq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmulhq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmulhq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmulhq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmulhq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmulhq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmulhq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshlq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshlq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshlq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshlq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshlq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshlq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshlq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshlq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshlq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshlq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshlq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshlq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshlq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshlq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshlq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshlq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshlq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshlq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshlq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshlq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshlq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshlq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshlq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshlq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshrnbq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshrnbq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshrnbq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshrnbq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshrnbq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshrnbq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshrnbq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshrnbq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshrntq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshrntq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshrntq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshrntq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshrntq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshrntq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshrntq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshrntq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshrunbq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshrunbq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshrunbq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshrunbq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshruntq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshruntq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshruntq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrshruntq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_m_r_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_m_r_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_m_r_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_m_r_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_m_r_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_m_r_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_r_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_r_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_r_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_r_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_r_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_r_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshlq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshluq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshluq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshluq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshluq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshluq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshluq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshrnbq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshrnbq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshrnbq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshrnbq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshrnbq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshrnbq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshrnbq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshrnbq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshrntq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshrntq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshrntq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshrntq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshrntq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshrntq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshrntq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshrntq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshrunbq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshrunbq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshrunbq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshrunbq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshruntq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshruntq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshruntq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqshruntq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev16q_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev16q_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev16q_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev16q_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev16q_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev16q_x_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev32q_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev32q_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev32q_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev32q_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev32q_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev32q_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev32q_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev32q_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev32q_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev32q_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev32q_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev32q_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev32q_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev32q_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev32q_x_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev64q_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev64q_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev64q_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev64q_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev64q_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev64q_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev64q_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev64q_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev64q_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev64q_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev64q_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev64q_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev64q_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev64q_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev64q_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev64q_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev64q_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev64q_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev64q_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev64q_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev64q_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev64q_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev64q_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrev64q_x_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrhaddq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrhaddq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrhaddq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrhaddq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrhaddq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrhaddq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrhaddq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrhaddq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrhaddq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrhaddq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrhaddq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrhaddq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrhaddq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrhaddq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrhaddq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrhaddq_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrhaddq_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrhaddq_x_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmlaldavhaq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmlaldavhaq_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmlaldavhaq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmlaldavhaq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmlaldavhaxq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmlaldavhaxq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmlaldavhq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmlaldavhq_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmlaldavhq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmlaldavhq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmlaldavhxq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmlaldavhxq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmlsldavhaq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmlsldavhaq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmlsldavhaxq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmlsldavhaxq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmlsldavhq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmlsldavhq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmlsldavhxq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmlsldavhxq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmulhq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmulhq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmulhq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmulhq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmulhq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmulhq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmulhq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmulhq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmulhq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmulhq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmulhq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmulhq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmulhq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmulhq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmulhq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmulhq_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmulhq_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmulhq_x_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndaq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndaq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndaq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndaq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndaq_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndaq_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndmq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndmq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndmq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndmq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndmq_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndmq_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndnq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndnq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndnq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndnq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndnq_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndnq_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndpq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndpq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndpq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndpq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndpq_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndpq_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndq_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndq_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndxq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndxq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndxq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndxq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndxq_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrndxq_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrnbq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrnbq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrnbq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrnbq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrnbq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrnbq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrnbq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrnbq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrntq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrntq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrntq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrntq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrntq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrntq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrntq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrntq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrq_x_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrq_x_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrq_x_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrq_x_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrq_x_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshrq_x_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsbciq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsbciq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsbciq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsbciq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsbcq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsbcq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsbcq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsbcq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlcq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlcq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlcq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlcq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlcq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlcq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlcq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlcq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlcq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlcq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlcq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlcq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshllbq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshllbq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshllbq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshllbq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshllbq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshllbq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshllbq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshllbq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshllbq_x_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshllbq_x_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshllbq_x_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshllbq_x_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlltq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlltq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlltq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlltq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlltq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlltq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlltq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlltq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlltq_x_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlltq_x_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlltq_x_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlltq_x_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_m_r_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_m_r_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_m_r_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_m_r_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_m_r_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_m_r_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_r_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_r_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_r_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_r_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_r_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_r_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_x_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_x_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_x_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_x_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_x_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_x_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshlq_x_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrnbq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrnbq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrnbq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrnbq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrnbq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrnbq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrnbq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrnbq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrntq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrntq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrntq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrntq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrntq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrntq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrntq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrntq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrq_x_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrq_x_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrq_x_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrq_x_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vshrq_x_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsliq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsliq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsliq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsliq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsliq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsliq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsliq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsliq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsliq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsliq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsliq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsliq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsriq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsriq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsriq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsriq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsriq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsriq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsriq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsriq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsriq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsriq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsriq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsriq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst1q_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst1q_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst1q_p_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst1q_p_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst1q_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst1q_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst1q_p_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst1q_p_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst1q_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst1q_p_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst1q_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst1q_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst1q_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst1q_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst1q_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst1q_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_p_s64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_p_u64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_s64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_u64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_wb_p_s64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_wb_p_u64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_wb_s64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_wb_u64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrdq_scatter_offset_p_s64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrdq_scatter_offset_p_u64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrdq_scatter_offset_s64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrdq_scatter_offset_u64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrdq_scatter_shifted_offset_p_s64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrdq_scatter_shifted_offset_p_u64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrdq_scatter_shifted_offset_s64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrdq_scatter_shifted_offset_u64.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vstrwq_f32.c: Use
++ check-function-bodies instead of scan-assembler checks. Use
++ extern "C" for C++ testing.
++ * gcc.target/arm/mve/intrinsics/vstrwq_p_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrwq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrwq_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrwq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_p_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_wb_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_wb_p_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_wb_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_wb_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_wb_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_wb_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_p_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_p_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrwq_u32.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vld1q_f16.c: Use
++ check-function-bodies instead of scan-assembler checks. Use
++ extern "C" for C++ testing.
++ * gcc.target/arm/mve/intrinsics/vld1q_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vld1q_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vld1q_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vld1q_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vld1q_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vld1q_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vld1q_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vld1q_z_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vld1q_z_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vld1q_z_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vld1q_z_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vld1q_z_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vld1q_z_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vld1q_z_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vld1q_z_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vld4q_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vld4q_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vld4q_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vld4q_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vld4q_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vld4q_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vld4q_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vld4q_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_z_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_z_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_z_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_z_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_z_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_z_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrbq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrbq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrbq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrbq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrbq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrbq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrbq_z_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrbq_z_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrbq_z_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrbq_z_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrbq_z_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrbq_z_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrdq_gather_base_s64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrdq_gather_base_u64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrdq_gather_base_wb_s64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrdq_gather_base_wb_u64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrdq_gather_base_wb_z_s64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrdq_gather_base_wb_z_u64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrdq_gather_base_z_s64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrdq_gather_base_z_u64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrdq_gather_offset_s64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrdq_gather_offset_u64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrdq_gather_offset_z_s64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrdq_gather_offset_z_u64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrdq_gather_shifted_offset_s64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrdq_gather_shifted_offset_u64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrdq_gather_shifted_offset_z_s64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrdq_gather_shifted_offset_z_u64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrhq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrhq_gather_offset_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrhq_gather_offset_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrhq_gather_offset_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrhq_gather_offset_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrhq_gather_offset_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrhq_gather_offset_z_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrhq_gather_offset_z_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrhq_gather_offset_z_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrhq_gather_offset_z_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrhq_gather_offset_z_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrhq_gather_shifted_offset_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrhq_gather_shifted_offset_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrhq_gather_shifted_offset_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrhq_gather_shifted_offset_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrhq_gather_shifted_offset_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrhq_gather_shifted_offset_z_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrhq_gather_shifted_offset_z_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrhq_gather_shifted_offset_z_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrhq_gather_shifted_offset_z_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrhq_gather_shifted_offset_z_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrhq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrhq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrhq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrhq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrhq_z_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrhq_z_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrhq_z_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrhq_z_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrhq_z_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrwq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrwq_gather_base_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrwq_gather_base_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrwq_gather_base_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrwq_gather_base_wb_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrwq_gather_base_wb_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrwq_gather_base_wb_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrwq_gather_base_wb_z_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrwq_gather_base_wb_z_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrwq_gather_base_wb_z_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrwq_gather_base_z_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrwq_gather_base_z_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrwq_gather_base_z_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrwq_gather_offset_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrwq_gather_offset_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrwq_gather_offset_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrwq_gather_offset_z_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrwq_gather_offset_z_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrwq_gather_offset_z_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrwq_gather_shifted_offset_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrwq_gather_shifted_offset_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrwq_gather_shifted_offset_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrwq_gather_shifted_offset_z_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrwq_gather_shifted_offset_z_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrwq_gather_shifted_offset_z_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrwq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrwq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrwq_z_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrwq_z_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrwq_z_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst2q_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst2q_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst2q_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst2q_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst2q_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst2q_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst2q_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst2q_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst4q_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst4q_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst4q_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst4q_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst4q_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst4q_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst4q_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vst4q_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrbq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrbq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrbq_p_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrbq_p_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrbq_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrbq_p_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrbq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrbq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrbq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_p_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_p_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_p_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrbq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrbq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrbq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrhq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrhq_p_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrhq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrhq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrhq_p_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrhq_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrhq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrhq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_p_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_p_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_p_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_p_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrhq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vstrhq_u32.c: Likewise.
++
++2023-05-18 Stam Markianos-Wright <stam.markianos-wright@arm.com>
++
++ Backported from master:
++ 2023-04-06 Stam Markianos-Wright <stam.markianos-wright@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/mve_intrinsic_type_overloads-fp.c: Remove unused variables.
++ * gcc.target/arm/mve/intrinsics/mve_intrinsic_type_overloads-int.c: Remove unused variables.
++
++2023-05-18 Murray Steele <murray.steele@arm.com>
++
++ Backported from master:
++ 2023-01-18 Murray Steele <murray.steele@arm.com>
++
++ * gcc.target/arm/mve/general/preserve_user_namespace_1.c: New test.
++
++2023-05-18 Andre Vieira <andre.simoesdiasvieira@arm.com>
++
++ Backported from master:
++ 2023-01-24 Andre Vieira <andre.simoesdiasvieira@arm.com>
++
++ * gcc.target/arm/mve/pr108177-1-run.c: New test.
++ * gcc.target/arm/mve/pr108177-1.c: New test.
++ * gcc.target/arm/mve/pr108177-10-run.c: New test.
++ * gcc.target/arm/mve/pr108177-10.c: New test.
++ * gcc.target/arm/mve/pr108177-11-run.c: New test.
++ * gcc.target/arm/mve/pr108177-11.c: New test.
++ * gcc.target/arm/mve/pr108177-12-run.c: New test.
++ * gcc.target/arm/mve/pr108177-12.c: New test.
++ * gcc.target/arm/mve/pr108177-13-run.c: New test.
++ * gcc.target/arm/mve/pr108177-13.c: New test.
++ * gcc.target/arm/mve/pr108177-14-run.c: New test.
++ * gcc.target/arm/mve/pr108177-14.c: New test.
++ * gcc.target/arm/mve/pr108177-2-run.c: New test.
++ * gcc.target/arm/mve/pr108177-2.c: New test.
++ * gcc.target/arm/mve/pr108177-3-run.c: New test.
++ * gcc.target/arm/mve/pr108177-3.c: New test.
++ * gcc.target/arm/mve/pr108177-4-run.c: New test.
++ * gcc.target/arm/mve/pr108177-4.c: New test.
++ * gcc.target/arm/mve/pr108177-5-run.c: New test.
++ * gcc.target/arm/mve/pr108177-5.c: New test.
++ * gcc.target/arm/mve/pr108177-6-run.c: New test.
++ * gcc.target/arm/mve/pr108177-6.c: New test.
++ * gcc.target/arm/mve/pr108177-7-run.c: New test.
++ * gcc.target/arm/mve/pr108177-7.c: New test.
++ * gcc.target/arm/mve/pr108177-8-run.c: New test.
++ * gcc.target/arm/mve/pr108177-8.c: New test.
++ * gcc.target/arm/mve/pr108177-9-run.c: New test.
++ * gcc.target/arm/mve/pr108177-9.c: New test.
++ * gcc.target/arm/mve/pr108177-main.x: New test include.
++ * gcc.target/arm/mve/pr108177.x: New test include.
++
++2023-05-18 Stam Markianos-Wright <stam.markianos-wright@arm.com>
++
++ Backported from master:
++ 2023-04-04 Stam Markianos-Wright <stam.markianos-wright@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vcreateq_f16.c: Tighten test.
++ * gcc.target/arm/mve/intrinsics/vcreateq_f32.c: Tighten test.
++ * gcc.target/arm/mve/intrinsics/vcreateq_s16.c: Tighten test.
++ * gcc.target/arm/mve/intrinsics/vcreateq_s32.c: Tighten test.
++ * gcc.target/arm/mve/intrinsics/vcreateq_s64.c: Tighten test.
++ * gcc.target/arm/mve/intrinsics/vcreateq_s8.c: Tighten test.
++ * gcc.target/arm/mve/intrinsics/vcreateq_u16.c: Tighten test.
++ * gcc.target/arm/mve/intrinsics/vcreateq_u32.c: Tighten test.
++ * gcc.target/arm/mve/intrinsics/vcreateq_u64.c: Tighten test.
++ * gcc.target/arm/mve/intrinsics/vcreateq_u8.c: Tighten test.
++
++2023-05-18 Stam Markianos-Wright <stam.markianos-wright@arm.com>
++
++ Backported from master:
++ 2023-01-16 Stam Markianos-Wright <stam.markianos-wright@arm.com>
++
++ PR target/96795
++ PR target/107515
++ * gcc.target/arm/mve/intrinsics/mve_intrinsic_type_overloads-fp.c: New test.
++ * gcc.target/arm/mve/intrinsics/mve_intrinsic_type_overloads-int.c: New test.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2023-01-25 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vhaddq_n_s16.c: Add missing extern
++ "C".
++ * gcc.target/arm/mve/intrinsics/vhaddq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_x_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_x_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_x_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_x_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_x_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_x_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_x_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_x_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_x_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_x_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_x_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_x_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_x_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_x_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavaxq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavaxq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavaxq_p_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavaxq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavaxq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavaxq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmlahq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmlahq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmlahq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmlashq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmlashq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmlashq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmlashq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmlashq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmlashq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsetq_lane_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsetq_lane_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsetq_lane_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsetq_lane_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsetq_lane_s64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsetq_lane_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsetq_lane_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsetq_lane_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsetq_lane_u64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsetq_lane_u8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2023-01-25 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vld2q_f16.c: Use
++ check-function-bodies instead of scan-assembler checks. Use
++ extern "C" for C++ testing.
++ * gcc.target/arm/mve/intrinsics/vld2q_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vld2q_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vld2q_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vld2q_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vld2q_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vld2q_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vld2q_u8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2023-01-25 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vqnegq_m_s16.c: Use
++ check-function-bodies instead of scan-assembler checks. Use
++ extern "C" for C++ testing.
++ * gcc.target/arm/mve/intrinsics/vqnegq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqnegq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqnegq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqnegq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqnegq_s8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2023-01-25 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vqrdmulhq_m_n_s16.c: Use
++ check-function-bodies instead of scan-assembler checks. Use
++ extern "C" for C++ testing.
++ * gcc.target/arm/mve/intrinsics/vqrdmulhq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmulhq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmulhq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmulhq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmulhq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmulhq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmulhq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmulhq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmulhq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmulhq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmulhq_s8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2023-01-25 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_m_s16.c: Use
++ check-function-bodies instead of scan-assembler checks. Use
++ extern "C" for C++ testing.
++ * gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_s8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2023-01-25 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vqrdmlsdhq_m_s16.c: Use
++ check-function-bodies instead of scan-assembler checks. Use
++ extern "C" for C++ testing.
++ * gcc.target/arm/mve/intrinsics/vqrdmlsdhq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlsdhq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlsdhq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlsdhq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlsdhq_s8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2023-01-25 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vqdmlsdhxq_m_s16.c: Use
++ check-function-bodies instead of scan-assembler checks. Use
++ extern "C" for C++ testing.
++ * gcc.target/arm/mve/intrinsics/vqdmlsdhxq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmlsdhxq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmlsdhxq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmlsdhxq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmlsdhxq_s8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2023-01-25 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vqdmlsdhq_m_s16.c: Use
++ check-function-bodies instead of scan-assembler checks. Use
++ extern "C" for C++ testing.
++ * gcc.target/arm/mve/intrinsics/vqdmlsdhq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmlsdhq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmlsdhq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmlsdhq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmlsdhq_s8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2023-01-25 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vqrdmlashq_n_s16.c: Use
++ check-function-bodies instead of scan-assembler checks. Use
++ extern "C" for C++ testing.
++ * gcc.target/arm/mve/intrinsics/vqrdmlashq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlashq_n_s8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2023-01-25 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vqrdmladhxq_m_s16.c: Use
++ check-function-bodies instead of scan-assembler checks. Use
++ extern "C" for C++ testing.
++ * gcc.target/arm/mve/intrinsics/vqrdmladhxq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmladhxq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmladhxq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmladhxq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmladhxq_s8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2023-01-25 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vqrdmladhq_m_s16.c: Use
++ check-function-bodies instead of scan-assembler checks. Use
++ extern "C" for C++ testing.
++ * gcc.target/arm/mve/intrinsics/vqrdmladhq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmladhq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmladhq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmladhq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmladhq_s8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2023-01-25 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vqdmladhxq_m_s16.c: Use
++ check-function-bodies instead of scan-assembler checks. Use
++ extern "C" for C++ testing.
++ * gcc.target/arm/mve/intrinsics/vqdmladhxq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmladhxq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmladhxq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmladhxq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmladhxq_s8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2023-01-25 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vqdmladhq_m_s16.c: Use
++ check-function-bodies instead of scan-assembler checks. Use
++ extern "C" for C++ testing.
++ * gcc.target/arm/mve/intrinsics/vqdmladhq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmladhq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmladhq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmladhq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmladhq_s8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2023-01-25 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vqabsq_m_s16.c: Use
++ check-function-bodies instead of scan-assembler checks. Use
++ extern "C" for C++ testing.
++ * gcc.target/arm/mve/intrinsics/vqabsq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqabsq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqabsq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqabsq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqabsq_s8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2023-01-25 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vcmulq_f16.c: Use
++ check-function-bodies instead of scan-assembler checks. Use
++ extern "C" for C++ testing.
++ * gcc.target/arm/mve/intrinsics/vcmulq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmulq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmulq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmulq_rot180_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmulq_rot180_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmulq_rot180_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmulq_rot180_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmulq_rot180_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmulq_rot180_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmulq_rot270_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmulq_rot270_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmulq_rot270_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmulq_rot270_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmulq_rot270_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmulq_rot270_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmulq_rot90_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmulq_rot90_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmulq_rot90_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmulq_rot90_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmulq_rot90_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmulq_rot90_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmulq_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmulq_x_f32.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2023-01-25 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vcmlaq_f16.c: Use
++ check-function-bodies instead of scan-assembler checks. Use
++ extern "C" for C++ testing.
++ * gcc.target/arm/mve/intrinsics/vcmlaq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmlaq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmlaq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmlaq_rot180_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmlaq_rot180_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmlaq_rot180_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmlaq_rot180_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmlaq_rot270_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmlaq_rot270_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmlaq_rot270_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmlaq_rot270_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmlaq_rot90_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmlaq_rot90_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmlaq_rot90_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmlaq_rot90_m_f32.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2023-01-25 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_f16.c: Use
++ check-function-bodies instead of scan-assembler checks. Use
++ extern "C" for C++ testing.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_u8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2023-01-25 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vmulltq_int_m_s16.c: Use
++ check-function-bodies instead of scan-assembler checks. Use
++ extern "C" for C++ testing.
++ * gcc.target/arm/mve/intrinsics/vmulltq_int_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulltq_int_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulltq_int_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulltq_int_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulltq_int_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulltq_int_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulltq_int_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulltq_int_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulltq_int_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulltq_int_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulltq_int_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulltq_int_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulltq_int_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulltq_int_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulltq_int_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulltq_int_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulltq_int_x_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulltq_poly_m_p16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulltq_poly_m_p8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulltq_poly_p16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulltq_poly_p8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulltq_poly_x_p16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulltq_poly_x_p8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2023-01-25 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vmullbq_int_m_s16.c: Use
++ check-function-bodies instead of scan-assembler checks. Use
++ extern "C" for C++ testing.
++ * gcc.target/arm/mve/intrinsics/vmullbq_int_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmullbq_int_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmullbq_int_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmullbq_int_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmullbq_int_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmullbq_int_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmullbq_int_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmullbq_int_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmullbq_int_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmullbq_int_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmullbq_int_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmullbq_int_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmullbq_int_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmullbq_int_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmullbq_int_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmullbq_int_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmullbq_int_x_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmullbq_poly_m_p16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmullbq_poly_m_p8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmullbq_poly_p16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmullbq_poly_p8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmullbq_poly_x_p16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmullbq_poly_x_p8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2023-01-25 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vmulhq_m_s16.c: Use
++ check-function-bodies instead of scan-assembler checks. Use
++ extern "C" for C++ testing.
++ * gcc.target/arm/mve/intrinsics/vmulhq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulhq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulhq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulhq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulhq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulhq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulhq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulhq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulhq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulhq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulhq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulhq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulhq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulhq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulhq_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulhq_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulhq_x_u8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2023-01-25 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vnegq_f16.c: Use
++ check-function-bodies instead of scan-assembler checks. Use
++ extern "C" for C++ testing.
++ * gcc.target/arm/mve/intrinsics/vnegq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vnegq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vnegq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vnegq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vnegq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vnegq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vnegq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vnegq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vnegq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vnegq_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vnegq_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vnegq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vnegq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vnegq_x_s8.c: Likewise.
++ * gcc.target/arm/simd/mve-vneg.c: Update test.
++ * gcc.target/arm/simd/mve-vshr.c: Likewise
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2023-01-25 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vclzq_m_s16.c: Use
++ check-function-bodies instead of scan-assembler checks. Use
++ extern "C" for C++ testing.
++ * gcc.target/arm/mve/intrinsics/vclzq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vclzq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vclzq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vclzq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vclzq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vclzq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vclzq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vclzq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vclzq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vclzq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vclzq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vclzq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vclzq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vclzq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vclzq_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vclzq_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vclzq_x_u8.c: Likewise.
++ * gcc.target/arm/simd/mve-vclz.c: Update test.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2023-01-25 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vclsq_m_s16.c: Use
++ check-function-bodies instead of scan-assembler checks. Use extern
++ "C" for C++ testing.
++ * gcc.target/arm/mve/intrinsics/vclsq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vclsq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vclsq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vclsq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vclsq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vclsq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vclsq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vclsq_x_s8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-12-08 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vabavq_p_s16.c: Extern functions
++ as "C".
++ * gcc.target/arm/mve/intrinsics/vabavq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabavq_p_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabavq_p_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabavq_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabavq_p_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabavq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabavq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabavq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabavq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabavq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabavq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_x_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabsq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabsq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabsq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabsq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabsq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabsq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabsq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabsq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabsq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabsq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabsq_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabsq_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabsq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabsq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabsq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddlvaq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddlvaq_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddlvaq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddlvaq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddlvq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddlvq_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddlvq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddlvq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvaq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvaq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvaq_p_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvaq_p_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvaq_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvaq_p_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvaq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvaq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvaq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvaq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvaq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvaq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvq_p_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvq_p_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvq_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvq_p_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpcsq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpcsq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpcsq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpcsq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpcsq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpcsq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpcsq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpcsq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpcsq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmphiq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmphiq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmphiq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmphiq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmphiq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmphiq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmphiq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmphiq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmphiq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmphiq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmphiq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmphiq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_m_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_m_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_m_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_m_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcreateq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcreateq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcreateq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcreateq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcreateq_s64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcreateq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcreateq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcreateq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcreateq_u64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcreateq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_m_wb_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_m_wb_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_m_wb_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_wb_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_wb_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_wb_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_x_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_x_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_x_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_x_wb_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_x_wb_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_x_wb_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_m_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_m_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_x_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_x_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_x_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_x_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_x_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_x_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_x_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_x_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_m_wb_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_m_wb_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_m_wb_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_wb_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_wb_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_wb_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_x_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_x_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_x_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_x_wb_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_x_wb_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_x_wb_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vfmasq_m_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vfmasq_m_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_m_wb_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_m_wb_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_m_wb_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_wb_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_wb_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_wb_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_x_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_x_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_x_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_x_wb_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_x_wb_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_x_wb_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_m_wb_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_m_wb_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_m_wb_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_wb_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_wb_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_wb_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_x_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_x_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_x_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_x_wb_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_x_wb_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_x_wb_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxaq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxaq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxaq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxaq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxaq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxaq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxavq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxavq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxavq_p_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxavq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxavq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxavq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmaq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmaq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmaq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmaq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmavq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmavq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmavq_p_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmavq_p_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmq_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmq_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmvq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmvq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmvq_p_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmvq_p_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_x_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxvq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxvq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxvq_p_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxvq_p_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxvq_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxvq_p_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxvq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxvq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxvq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxvq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxvq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxvq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminaq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminaq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminaq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminaq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminaq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminaq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminavq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminavq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminavq_p_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminavq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminavq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminavq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmaq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmaq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmaq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmaq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmavq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmavq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmavq_p_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmavq_p_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmq_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmq_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmvq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmvq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmvq_p_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmvq_p_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_x_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminvq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminvq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminvq_p_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminvq_p_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminvq_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminvq_p_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminvq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminvq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminvq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminvq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminvq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminvq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavaq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavaq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavaq_p_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavaq_p_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavaq_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavaq_p_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavaq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavaq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavaq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavaq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavaq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavaq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaldavaxq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaldavaxq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaldavaxq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaldavaxq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlasq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlasq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlasq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlasq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlasq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlasq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlasq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlasq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlasq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlasq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlasq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlasq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmlahq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmlahq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmlahq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulhq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulhq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulhq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulhq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulhq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulhq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulhq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulhq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulhq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulhq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulhq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulhq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmullbq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmullbq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmullbq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmullbq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmullbq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmullbq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmullbq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmullbq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulltq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulltq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulltq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulltq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulltq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulltq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulltq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulltq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlahq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlahq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlahq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlashq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlashq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlashq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqsubq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmlaldavhaq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmlaldavhaq_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmlaldavhaq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrmlaldavhaq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_x_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_u8.c: Likewise.
++
++2023-05-18 Christophe Lyon <christophe.lyon@arm.com>
++
++ Backported from master:
++ 2022-09-30 Christophe Lyon <christophe.lyon@arm.com>
++
++ * gcc.target/arm/mve/mve_load_memory_modes.c: Update expected
++ registers.
++ * gcc.target/arm/mve/mve_store_memory_modes.c: Likewise.
++
++2023-05-18 Christophe Lyon <christophe.lyon@arm.com>
++
++ Backported from master:
++ 2022-10-03 Christophe Lyon <christophe.lyon@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vrev64q_m_s16-clobber.c: New test.
++
++2023-05-18 Christophe Lyon <christophe.lyon@arm.com>
++
++ Backported from master:
++ 2022-12-01 Christophe Lyon <christophe.lyon@arm.com>
++
++ * gcc.target/arm/simd/mve-compare-1.c: Update.
++ * gcc.target/arm/simd/mve-compare-scalar-1.c: Update.
++ * gcc.target/arm/simd/mve-vabs.c: Update.
++ * gcc.target/arm/simd/mve-vadd-1.c: Update.
++ * gcc.target/arm/simd/mve-vadd-scalar-1.c: Update.
++ * gcc.target/arm/simd/mve-vcmp.c: Update.
++ * gcc.target/arm/simd/pr101325.c: Update.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vsetq_lane_f16.c: Improve test.
++ * gcc.target/arm/mve/intrinsics/vsetq_lane_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsetq_lane_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsetq_lane_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsetq_lane_s64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsetq_lane_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsetq_lane_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsetq_lane_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsetq_lane_u64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsetq_lane_u8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vrshlq_m_n_s16.c: Improve tests.
++ * gcc.target/arm/mve/intrinsics/vrshlq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vrshlq_x_u8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vrmlaldavhaq_p_s32.c: Improve test.
++ * gcc.target/arm/mve/intrinsics/vrmlaldavhaq_p_u32.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_n_s16.c:
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_n_s32.c:
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_n_s8.c:
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_n_u16.c:
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_n_u32.c:
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_n_u8.c:
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_s16.c:
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_s32.c:
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_s8.c:
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_u16.c:
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_u32.c:
++ * gcc.target/arm/mve/intrinsics/vqsubq_m_u8.c:
++ * gcc.target/arm/mve/intrinsics/vqsubq_n_s16.c:
++ * gcc.target/arm/mve/intrinsics/vqsubq_n_s32.c:
++ * gcc.target/arm/mve/intrinsics/vqsubq_n_s8.c:
++ * gcc.target/arm/mve/intrinsics/vqsubq_n_u16.c:
++ * gcc.target/arm/mve/intrinsics/vqsubq_n_u32.c:
++ * gcc.target/arm/mve/intrinsics/vqsubq_n_u8.c:
++ * gcc.target/arm/mve/intrinsics/vqsubq_s16.c:
++ * gcc.target/arm/mve/intrinsics/vqsubq_s32.c:
++ * gcc.target/arm/mve/intrinsics/vqsubq_s8.c:
++ * gcc.target/arm/mve/intrinsics/vqsubq_u16.c:
++ * gcc.target/arm/mve/intrinsics/vqsubq_u32.c:
++ * gcc.target/arm/mve/intrinsics/vqsubq_u8.c:
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vqrdmlashq_m_n_s16.c:
++ * gcc.target/arm/mve/intrinsics/vqrdmlashq_m_n_s32.c:
++ * gcc.target/arm/mve/intrinsics/vqrdmlashq_m_n_s8.c:
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vqrdmlahq_m_n_s16.c: Improve test.
++ * gcc.target/arm/mve/intrinsics/vqrdmlahq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlahq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vqdmulhq_m_n_s16.c: Improve tests.
++ * gcc.target/arm/mve/intrinsics/vqdmulhq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulhq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulhq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulhq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulhq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulhq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulhq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulhq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulhq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulhq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulhq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmullbq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmullbq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmullbq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmullbq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmullbq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmullbq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmullbq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmullbq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulltq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulltq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulltq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulltq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulltq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulltq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulltq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmulltq_s32.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vqdmlahq_m_n_s16.c: Improve test.
++ * gcc.target/arm/mve/intrinsics/vqdmlahq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmlahq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmlahq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmlahq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmlahq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmlashq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmlashq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmlashq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmlashq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmlashq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqdmlashq_n_s8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vqaddq_m_n_s16.c: Improve test.
++ * gcc.target/arm/mve/intrinsics/vqaddq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vqaddq_u8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vmlasq_m_n_s16.c: Improve test.
++ * gcc.target/arm/mve/intrinsics/vmlasq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlasq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlasq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlasq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlasq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlasq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlasq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlasq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlasq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlasq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlasq_n_u8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vmlaldavaxq_p_s16.c: Improve tests.
++ * gcc.target/arm/mve/intrinsics/vmlaldavaxq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaldavaxq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmlaldavaxq_s32.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vmladavaq_p_s16.c: Improve tests.
++ * gcc.target/arm/mve/intrinsics/vmladavaq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavaq_p_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavaq_p_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavaq_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavaq_p_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavaxq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavaxq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavaxq_p_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavaxq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavaxq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmladavaxq_s8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/viwdupq_m_n_u16.c: Improve tests.
++ * gcc.target/arm/mve/intrinsics/viwdupq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_m_wb_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_m_wb_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_m_wb_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_wb_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_wb_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_wb_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_x_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_x_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_x_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_x_wb_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_x_wb_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/viwdupq_x_wb_u8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vhsubq_m_n_s16.c: Improve test.
++ * gcc.target/arm/mve/intrinsics/vhsubq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_x_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_x_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_x_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_x_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_x_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_x_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhsubq_x_u8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vhaddq_m_n_s16.c: Improve test.
++ * gcc.target/arm/mve/intrinsics/vhaddq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_x_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_x_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_x_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_x_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_x_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_x_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vhaddq_x_u8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vfmasq_m_n_f16.c: Improve test.
++ * gcc.target/arm/mve/intrinsics/vfmasq_m_n_f32.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vsubq_f16.c: Improve test.
++ * gcc.target/arm/mve/intrinsics/vsubq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vsubq_x_u8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vmulq_f16.c: Improve test.
++ * gcc.target/arm/mve/intrinsics/vmulq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmulq_x_u8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vaddlvaq_p_s32.c: Improve test.
++ * gcc.target/arm/mve/intrinsics/vaddlvaq_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddlvaq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddlvaq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddlvq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddlvq_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddlvq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddlvq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddq_x_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvaq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvaq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvaq_p_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvaq_p_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvaq_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvaq_p_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvaq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvaq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvaq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvaq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvaq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvaq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvq_p_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvq_p_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvq_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvq_p_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vaddvq_u8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vabsq_f16.c: Improve test.
++ * gcc.target/arm/mve/intrinsics/vabsq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabsq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabsq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabsq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabsq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabsq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabsq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabsq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabsq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabsq_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabsq_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabsq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabsq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabsq_x_s8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vabdq_f16.c: Improve test.
++ * gcc.target/arm/mve/intrinsics/vabdq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabdq_x_u8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vabavq_p_s16.c: Improve test.
++ * gcc.target/arm/mve/intrinsics/vabavq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabavq_p_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabavq_p_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabavq_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabavq_p_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabavq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabavq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabavq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabavq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabavq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vabavq_u8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vmaxaq_m_s16.c: Improve test.
++ * gcc.target/arm/mve/intrinsics/vmaxaq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxaq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxaq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxaq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxaq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxavq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxavq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxavq_p_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxavq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxavq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxavq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmaq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmaq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmaq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmaq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmavq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmavq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmavq_p_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmavq_p_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmq_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmq_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmvq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmvq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmvq_p_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxnmvq_p_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxq_x_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxvq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxvq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxvq_p_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxvq_p_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxvq_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxvq_p_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxvq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxvq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxvq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxvq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxvq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vmaxvq_u8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vminaq_m_s16.c: Improve test.
++ * gcc.target/arm/mve/intrinsics/vminaq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminaq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminaq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminaq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminaq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminavq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminavq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminavq_p_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminavq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminavq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminavq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmaq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmaq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmaq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmaq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmavq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmavq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmavq_p_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmavq_p_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmq_x_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmq_x_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmvq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmvq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmvq_p_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminnmvq_p_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_x_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_x_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_x_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_x_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_x_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminq_x_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminvq_p_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminvq_p_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminvq_p_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminvq_p_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminvq_p_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminvq_p_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminvq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminvq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminvq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminvq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminvq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vminvq_u8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u16.c: Improve test.
++ * gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpcsq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpcsq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpcsq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpcsq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpcsq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpcsq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpcsq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpcsq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpcsq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpeqq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgeq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpgtq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmphiq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmphiq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmphiq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmphiq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmphiq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmphiq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmphiq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmphiq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmphiq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmphiq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmphiq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmphiq_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_m_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_m_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpleq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_m_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_m_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpltq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_m_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcmpneq_u8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vdupq_m_n_f16.c: Improve test.
++ * gcc.target/arm/mve/intrinsics/vdupq_m_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_m_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_m_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_m_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_m_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_x_n_f16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_x_n_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_x_n_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_x_n_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_x_n_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_x_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_x_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdupq_x_n_u8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vidupq_m_n_u16.c: Improve tests.
++ * gcc.target/arm/mve/intrinsics/vidupq_m_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_m_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_m_wb_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_m_wb_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_m_wb_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_wb_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_wb_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_wb_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_x_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_x_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_x_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_x_wb_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_x_wb_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vidupq_x_wb_u8.c: Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vdwdupq_m_n_u16.c : Improve test.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_m_n_u32.c : Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_m_n_u8.c : Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_m_wb_u16.c : Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_m_wb_u32.c : Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_m_wb_u8.c : Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_n_u16.c : Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_n_u32.c : Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_n_u8.c : Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_wb_u16.c : Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_wb_u32.c : Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_wb_u8.c : Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_x_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_x_n_u32.c : Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_x_n_u8.c : Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_x_wb_u16.c : Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_x_wb_u32.c : Likewise.
++ * gcc.target/arm/mve/intrinsics/vdwdupq_x_wb_u8.c : Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vddupq_m_n_u16.c: Improve test.
++ * gcc.target/arm/mve/intrinsics/vddupq_m_n_u32.c : Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_m_n_u8.c : Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_m_wb_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_m_wb_u32.c : Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_m_wb_u8.c : Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_n_u16.c : Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_n_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_n_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_wb_u16.c : Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_wb_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_wb_u8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_x_n_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_x_n_u32.c : Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_x_n_u8.c : Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_x_wb_u16.c : Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_x_wb_u32.c : Likewise.
++ * gcc.target/arm/mve/intrinsics/vddupq_x_wb_u8.c : Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vldrwq_gather_base_wb_z_f32.c:
++ Update test.
++ * gcc.target/arm/mve/intrinsics/vldrwq_gather_base_wb_z_s32.c:
++ Likewise.
++ * gcc.target/arm/mve/intrinsics/vldrwq_gather_base_wb_z_u32.c:
++ Likewise.
++
++2023-05-18 Andrea Corallo <andrea.corallo@arm.com>
++
++ Backported from master:
++ 2022-11-28 Andrea Corallo <andrea.corallo@arm.com>
++
++ * gcc.target/arm/mve/intrinsics/vcreateq_f16.c: Improve test.
++ * gcc.target/arm/mve/intrinsics/vcreateq_f32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcreateq_s16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcreateq_s32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcreateq_s64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcreateq_s8.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcreateq_u16.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcreateq_u32.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcreateq_u64.c: Likewise.
++ * gcc.target/arm/mve/intrinsics/vcreateq_u8.c: Likewise.
++
++2023-05-17 Jakub Jelinek <jakub@redhat.com>
++
++ Backported from master:
++ 2023-05-17 Jakub Jelinek <jakub@redhat.com>
++
++ PR c++/109868
++ * g++.dg/init/pr109868.C: New test.
++
++2023-05-15 Jason Merrill <jason@redhat.com>
++
++ PR c++/109241
++ * g++.dg/cpp1y/lambda-generic-local-class2.C: New test.
++
++2023-05-15 Richard Biener <rguenther@suse.de>
++
++ PR testsuite/108776
++ * c-c++-common/rotate-11.c: Add --param logical-op-non-short-circuit=1.
++
++2023-05-15 Richard Biener <rguenther@suse.de>
++
++ Backported from master:
++ 2023-05-15 Richard Biener <rguenther@suse.de>
++
++ * gcc.dg/vect/pr108950.c: Re-order dg-require-effective-target
++ and dg-do.
++
++2023-05-10 Richard Biener <rguenther@suse.de>
++
++ Backported from master:
++ 2023-05-10 Richard Biener <rguenther@suse.de>
++
++ * g++.dg/torture/pr106922.C: Force _GLIBCXX_USE_CXX11_ABI to 1.
++
++2023-05-09 Patrick Palka <ppalka@redhat.com>
++
++ Backported from master:
++ 2023-04-01 Patrick Palka <ppalka@redhat.com>
++
++ PR c++/109160
++ * g++.dg/cpp2a/concepts-placeholder12.C: New test.
++
++2023-05-09 Jakub Jelinek <jakub@redhat.com>
++
++ Backported from master:
++ 2023-05-09 Jakub Jelinek <jakub@redhat.com>
++
++ PR tree-optimization/109778
++ * gcc.dg/lto/pr109778_0.c: New test.
++ * gcc.dg/lto/pr109778_1.c: New file.
++
++2023-05-09 Jakub Jelinek <jakub@redhat.com>
++
++ Backported from master:
++ 2023-05-09 Jakub Jelinek <jakub@redhat.com>
++
++ PR tree-optimization/109778
++ * gcc.c-torture/execute/pr109778.c: New test.
++
++2023-05-09 Martin Uecker <uecker@tugraz.at>
++
++ Backported from master:
++ 2023-02-18 Martin Uecker <uecker@tugraz.at>
++
++ PR c/105660
++ PR c/105660
++ * gcc.dg/pr105660-1.c: New test.
++ * gcc.dg/pr105660-2.c: New test.
++
++2023-05-09 Kewen Lin <linkw@linux.ibm.com>
++
++ Backported from master:
++ 2023-04-26 Kewen Lin <linkw@linux.ibm.com>
++
++ PR target/109069
++ * gcc.target/powerpc/pr109069-1.c: New test.
++ * gcc.target/powerpc/pr109069-2-run.c: New test.
++ * gcc.target/powerpc/pr109069-2.c: New test.
++ * gcc.target/powerpc/pr109069-2.h: New test.
++
++2023-05-09 Jason Merrill <jason@redhat.com>
++
++ PR c++/106740
++ PR c++/105852
++ * g++.dg/template/friend78.C: New test.
++
+ 2023-05-08 Release Manager
+
+ * GCC 12.3.0 released.
+--- a/src/gcc/testsuite/c-c++-common/rotate-11.c
++++ b/src/gcc/testsuite/c-c++-common/rotate-11.c
+@@ -1,6 +1,6 @@
+ /* PR tree-optimization/108440 */
+ /* { dg-do compile { target { { ilp32 || lp64 } || llp64 } } } */
+-/* { dg-options "-O2 -fdump-tree-optimized" } */
++/* { dg-options "-O2 -fdump-tree-optimized --param logical-op-non-short-circuit=1" } */
+ /* { dg-final { scan-tree-dump-times " r<< " 5 "optimized" } } */
+ /* { dg-final { scan-tree-dump-times " \\\& 7;" 4 "optimized" } } */
+
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/g++.dg/cpp0x/noexcept78.C
+@@ -0,0 +1,16 @@
++// PR c++/109761
++// { dg-do compile { target c++11 } }
++
++struct base {
++ virtual void foo() noexcept { }
++ virtual ~base() { }
++};
++
++struct outer : base {
++ struct nested {
++ void foo() noexcept(noexcept(g())); // { dg-bogus "looser" }
++ ~nested() noexcept(noexcept(g())); // { dg-bogus "looser" }
++ };
++ static void g();
++};
++
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/g++.dg/cpp0x/noexcept79.C
+@@ -0,0 +1,18 @@
++// PR c++/110468
++// { dg-do compile { target c++11 } }
++
++template<int T>
++struct variant {
++ variant() noexcept(T > 0);
++};
++
++template<int N>
++struct A {
++ variant<N> m = {};
++};
++
++struct B {
++ B(A<1>);
++};
++
++B b = {{}};
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/g++.dg/cpp0x/nsdmi-array2.C
+@@ -0,0 +1,15 @@
++// PR c++/109666
++// { dg-do compile { target c++11 } }
++
++struct Point {
++ int value_;
++};
++template <int n> struct StaticVector {
++ static StaticVector create() {
++ StaticVector output;
++ return output;
++ }
++ Point _M_elems[n]{};
++
++};
++void f() { StaticVector<3>::create(); }
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/g++.dg/cpp0x/nsdmi-template25.C
+@@ -0,0 +1,18 @@
++// PR c++/106890
++// { dg-do compile { target c++11 } }
++
++struct A
++{
++ int p;
++};
++
++template<typename T>
++struct B : virtual public A
++{
++ B() { }
++ B(int) { }
++
++ int k = this->p;
++};
++
++template struct B<int>;
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/g++.dg/cpp1y/lambda-generic-local-class2.C
+@@ -0,0 +1,13 @@
++// PR c++/109241
++// { dg-do compile { target c++14 } }
++// { dg-options "" } no pedantic
++
++void g() {
++ [](auto) {
++ [](auto) {
++ ({
++ struct A {};
++ });
++ };
++ }(1);
++}
+--- a/src/gcc/testsuite/g++.dg/cpp23/feat-cxx2b.C
++++ b/src/gcc/testsuite/g++.dg/cpp23/feat-cxx2b.C
+@@ -557,3 +557,9 @@
+ #elif __cpp_multidimensional_subscript != 202110
+ # error "__cpp_multidimensional_subscript != 202110"
+ #endif
++
++#ifndef __cpp_auto_cast
++# error "__cpp_auto_cast"
++#elif __cpp_auto_cast != 202110
++# error "__cpp_auto_cast != 202110"
++#endif
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/g++.dg/cpp2a/concepts-placeholder12.C
+@@ -0,0 +1,29 @@
++// PR c++/109160
++// { dg-do compile { target c++20 } }
++
++template<class T, bool B>
++concept C = B;
++
++template<int> struct X { };
++
++template<bool B>
++struct A {
++ template<C<B> auto V> static void f();
++ template<C<B> auto V> static void g(X<V>);
++ template<C<B> auto V> static inline int value;
++ template<C<B> auto V> struct D { };
++};
++
++int main() {
++ A<true>::f<0>();
++ A<false>::f<0>(); // { dg-error "no match|constraints" }
++
++ A<true>::g(X<0>{});
++ A<false>::g(X<0>{}); // { dg-error "no match|constraints" }
++
++ bool v1 = A<true>::value<0>;
++ bool v2 = A<false>::value<0>; // { dg-error "constraints" }
++
++ A<true>::D<0> d1;
++ A<false>::D<0> d2; // { dg-error "constraints" }
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/g++.dg/ext/int128-7.C
+@@ -0,0 +1,4 @@
++// PR c++/108099
++// { dg-do compile { target { c++11 && int128 } } }
++
++using i128 = signed __int128_t; // { dg-error "specified with" }
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/g++.dg/ext/int128-8.C
+@@ -0,0 +1,24 @@
++// PR c++/108099
++// { dg-do compile { target { c++11 && int128 } } }
++// { dg-options "" }
++
++using u128 = unsigned __int128_t;
++using s128 = signed __int128_t;
++template <typename T, T v> struct integral_constant {
++ static constexpr T value = v;
++};
++typedef integral_constant <bool, false> false_type;
++typedef integral_constant <bool, true> true_type;
++template <class T, class U>
++struct is_same : false_type {};
++template <class T>
++struct is_same <T, T> : true_type {};
++static_assert (is_same <__int128, s128>::value, "");
++static_assert (is_same <signed __int128, s128>::value, "");
++static_assert (is_same <__int128_t, s128>::value, "");
++static_assert (is_same <unsigned __int128, u128>::value, "");
++static_assert (is_same <__uint128_t, u128>::value, "");
++static_assert (sizeof (s128) == sizeof (__int128), "");
++static_assert (sizeof (u128) == sizeof (unsigned __int128), "");
++static_assert (s128(-1) < 0, "");
++static_assert (u128(-1) > 0, "");
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/g++.dg/ext/unsigned-typedef2.C
+@@ -0,0 +1,25 @@
++// PR c++/108099
++// { dg-do compile { target c++11 } }
++// { dg-options "" }
++
++typedef long long t64;
++template <typename T, T v> struct integral_constant {
++ static constexpr T value = v;
++};
++typedef integral_constant <bool, false> false_type;
++typedef integral_constant <bool, true> true_type;
++template <class T, class U>
++struct is_same : false_type {};
++template <class T>
++struct is_same <T, T> : true_type {};
++
++using s64 = signed t64;
++static_assert (is_same <long long, s64>::value, "");
++static_assert (is_same <signed long long, s64>::value, "");
++static_assert (sizeof (s64) == sizeof (long long), "");
++static_assert (s64(-1) < 0, "");
++
++using u64 = unsigned t64;
++static_assert (is_same <unsigned long long, u64>::value, "");
++static_assert (sizeof (u64) == sizeof (unsigned long long), "");
++static_assert (u64(-1) > 0, "");
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/g++.dg/ext/unsigned-typedef3.C
+@@ -0,0 +1,25 @@
++// PR c++/108099
++// { dg-do compile { target c++11 } }
++// { dg-options "" }
++
++typedef unsigned long long t64;
++template <typename T, T v> struct integral_constant {
++ static constexpr T value = v;
++};
++typedef integral_constant <bool, false> false_type;
++typedef integral_constant <bool, true> true_type;
++template <class T, class U>
++struct is_same : false_type {};
++template <class T>
++struct is_same <T, T> : true_type {};
++
++using s64 = signed t64;
++static_assert (is_same <long long, s64>::value, "");
++static_assert (is_same <signed long long, s64>::value, "");
++static_assert (sizeof (s64) == sizeof (long long), "");
++static_assert (s64(-1) < 0, "");
++
++using u64 = unsigned t64;
++static_assert (is_same <unsigned long long, u64>::value, "");
++static_assert (sizeof (u64) == sizeof (unsigned long long), "");
++static_assert (u64(-1) > 0, "");
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/g++.dg/init/pr109868.C
+@@ -0,0 +1,13 @@
++// PR c++/109868
++// { dg-do compile }
++// { dg-options "-O2" }
++
++struct A { virtual void foo (); };
++struct B { long b; int : 0; };
++struct C : A { B c; };
++
++void
++bar (C *p)
++{
++ *p = C ();
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/g++.dg/template/friend78.C
+@@ -0,0 +1,18 @@
++// PR c++/106740
++// { dg-additional-options -Wno-non-template-friend }
++
++template <typename> struct EnumClass { friend int toString(EnumClass); };
++struct AmhsConvInfoCoFw {
++ enum AftnTypeXMsgTypeEnum {};
++ typedef EnumClass<AftnTypeXMsgTypeEnum> AftnTypeXMsgType;
++ const int getAftnTypeXMsgTypeAsStr() const;
++ struct MtcuAxgwInfo {
++ AftnTypeXMsgType mAftnTypeXMsgType;
++ };
++};
++const int AmhsConvInfoCoFw::getAftnTypeXMsgTypeAsStr() const {
++ MtcuAxgwInfo __trans_tmp_1;
++ toString(__trans_tmp_1.mAftnTypeXMsgType);
++ return 0;
++}
++int toString(AmhsConvInfoCoFw::AftnTypeXMsgType);
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/g++.dg/template/template-keyword4.C
+@@ -0,0 +1,18 @@
++// PR c++/106310
++
++template <class T>
++struct set{};
++
++template< typename T >
++struct Base
++{
++ template< int > int set(T const &);
++};
++
++template< typename T >
++struct Derived : Base< T >
++{
++ void f(T const &arg) {
++ this->template set< 0 >(arg);
++ }
++};
+--- a/src/gcc/testsuite/g++.dg/torture/pr106922.C
++++ b/src/gcc/testsuite/g++.dg/torture/pr106922.C
+@@ -4,8 +4,16 @@
+ // -O1 doesn't iterate VN and thus has bogus uninit diagnostics
+ // { dg-skip-if "" { *-*-* } { "-O1" } { "" } }
+
++// The testcase still emits bogus diagnostics with the pre-C++11 ABI
++#undef _GLIBCXX_USE_CXX11_ABI
++#define _GLIBCXX_USE_CXX11_ABI 1
++
+ #include <vector>
+
++// When the library is not dual-ABI and defaults to old just compile
++// an empty TU
++#if _GLIBCXX_USE_CXX11_ABI
++
+ #include <optional>
+ template <class T>
+ using Optional = std::optional<T>;
+@@ -46,3 +54,4 @@ void test()
+ externals.external2 = internal2;
+ }
+ }
++#endif
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/g++.target/aarch64/acle/acle.exp
+@@ -0,0 +1,35 @@
++# Copyright (C) 2014-2023 Free Software Foundation, Inc.
++
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with GCC; see the file COPYING3. If not see
++# <http://www.gnu.org/licenses/>.
++
++# GCC testsuite that uses the `dg.exp' driver.
++
++# Exit immediately if this isn't an AArch64 target.
++if ![istarget aarch64*-*-*] then {
++ return
++}
++
++# Load support procs.
++load_lib g++-dg.exp
++
++# Initialize `dg'.
++dg-init
++
++# Main loop.
++dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cCS\]]] \
++ "" ""
++
++# All done.
++dg-finish
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/g++.target/aarch64/acle/ls64.C
+@@ -0,0 +1,10 @@
++/* { dg-do compile } */
++/* { dg-additional-options "-march=armv8.7-a" } */
++#include <arm_acle.h>
++int main()
++{
++ data512_t d = __arm_ld64b ((const void *)0x1000);
++ __arm_st64b ((void *)0x2000, d);
++ uint64_t x = __arm_st64bv ((void *)0x3000, d);
++ x += __arm_st64bv0 ((void *)0x4000, d);
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/g++.target/aarch64/acle/ls64_lto.C
+@@ -0,0 +1,10 @@
++/* { dg-do link { target aarch64_asm_ls64_ok } } */
++/* { dg-additional-options "-march=armv8.7-a -flto" } */
++#include <arm_acle.h>
++int main()
++{
++ data512_t d = __arm_ld64b ((const void *)0x1000);
++ __arm_st64b ((void *)0x2000, d);
++ uint64_t x = __arm_st64bv ((void *)0x3000, d);
++ x += __arm_st64bv0 ((void *)0x4000, d);
++}
+--- a/src/gcc/testsuite/g++.target/aarch64/pr103147-10.C
++++ b/src/gcc/testsuite/g++.target/aarch64/pr103147-10.C
+@@ -1,4 +1,4 @@
+-/* { dg-options "-O2 -fpack-struct -mstrict-align" } */
++/* { dg-options "-O2 -fpack-struct -mstrict-align -fno-stack-protector" } */
+ /* { dg-final { check-function-bodies "**" "" "" } } */
+
+ #include <arm_neon.h>
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/g++.target/powerpc/pr105325.C
+@@ -0,0 +1,28 @@
++/* { dg-do assemble } */
++/* { dg-require-effective-target lp64 } */
++/* { dg-require-effective-target power10_ok } */
++/* { dg-require-effective-target powerpc_prefixed_addr } */
++/* { dg-options "-O2 -mdejagnu-cpu=power10 -fstack-protector" } */
++
++/* PR target/105324. Test that power10 fusion does not generate an LWA/CMPDI
++ with a large offset that the assembler rejects. Instead it should a
++ PLWZ/CMPWI combination.
++
++ Originally, the code was dying because the fusion load + compare -1/0/1
++ patterns did not handle the possibility that the load might be prefixed.
++ The -fstack-protector option is needed to show the bug. */
++
++struct Ath__array1D {
++ int _current;
++ int getCnt() { return _current; }
++};
++struct extMeasure {
++ int _mapTable[10000];
++ Ath__array1D _metRCTable;
++};
++void measureRC() {
++ extMeasure m;
++ for (; m._metRCTable.getCnt();)
++ for (;;)
++ ;
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/g++.target/powerpc/pr110741.C
+@@ -0,0 +1,552 @@
++/* { dg-do run { target { power10_hw } } } */
++/* { dg-options "-O2 -mdejagnu-cpu=power10" } */
++
++#include <altivec.h>
++
++typedef unsigned char uint8_t;
++
++template <uint8_t kTernLogOp>
++static inline vector unsigned long long
++VSXTernaryLogic (vector unsigned long long a, vector unsigned long long b,
++ vector unsigned long long c)
++{
++ return vec_ternarylogic (a, b, c, kTernLogOp);
++}
++
++static vector unsigned long long
++VSXTernaryLogic (vector unsigned long long a, vector unsigned long long b,
++ vector unsigned long long c, int ternary_logic_op)
++{
++ switch (ternary_logic_op & 0xFF)
++ {
++ case 0:
++ return VSXTernaryLogic<0> (a, b, c);
++ case 1:
++ return VSXTernaryLogic<1> (a, b, c);
++ case 2:
++ return VSXTernaryLogic<2> (a, b, c);
++ case 3:
++ return VSXTernaryLogic<3> (a, b, c);
++ case 4:
++ return VSXTernaryLogic<4> (a, b, c);
++ case 5:
++ return VSXTernaryLogic<5> (a, b, c);
++ case 6:
++ return VSXTernaryLogic<6> (a, b, c);
++ case 7:
++ return VSXTernaryLogic<7> (a, b, c);
++ case 8:
++ return VSXTernaryLogic<8> (a, b, c);
++ case 9:
++ return VSXTernaryLogic<9> (a, b, c);
++ case 10:
++ return VSXTernaryLogic<10> (a, b, c);
++ case 11:
++ return VSXTernaryLogic<11> (a, b, c);
++ case 12:
++ return VSXTernaryLogic<12> (a, b, c);
++ case 13:
++ return VSXTernaryLogic<13> (a, b, c);
++ case 14:
++ return VSXTernaryLogic<14> (a, b, c);
++ case 15:
++ return VSXTernaryLogic<15> (a, b, c);
++ case 16:
++ return VSXTernaryLogic<16> (a, b, c);
++ case 17:
++ return VSXTernaryLogic<17> (a, b, c);
++ case 18:
++ return VSXTernaryLogic<18> (a, b, c);
++ case 19:
++ return VSXTernaryLogic<19> (a, b, c);
++ case 20:
++ return VSXTernaryLogic<20> (a, b, c);
++ case 21:
++ return VSXTernaryLogic<21> (a, b, c);
++ case 22:
++ return VSXTernaryLogic<22> (a, b, c);
++ case 23:
++ return VSXTernaryLogic<23> (a, b, c);
++ case 24:
++ return VSXTernaryLogic<24> (a, b, c);
++ case 25:
++ return VSXTernaryLogic<25> (a, b, c);
++ case 26:
++ return VSXTernaryLogic<26> (a, b, c);
++ case 27:
++ return VSXTernaryLogic<27> (a, b, c);
++ case 28:
++ return VSXTernaryLogic<28> (a, b, c);
++ case 29:
++ return VSXTernaryLogic<29> (a, b, c);
++ case 30:
++ return VSXTernaryLogic<30> (a, b, c);
++ case 31:
++ return VSXTernaryLogic<31> (a, b, c);
++ case 32:
++ return VSXTernaryLogic<32> (a, b, c);
++ case 33:
++ return VSXTernaryLogic<33> (a, b, c);
++ case 34:
++ return VSXTernaryLogic<34> (a, b, c);
++ case 35:
++ return VSXTernaryLogic<35> (a, b, c);
++ case 36:
++ return VSXTernaryLogic<36> (a, b, c);
++ case 37:
++ return VSXTernaryLogic<37> (a, b, c);
++ case 38:
++ return VSXTernaryLogic<38> (a, b, c);
++ case 39:
++ return VSXTernaryLogic<39> (a, b, c);
++ case 40:
++ return VSXTernaryLogic<40> (a, b, c);
++ case 41:
++ return VSXTernaryLogic<41> (a, b, c);
++ case 42:
++ return VSXTernaryLogic<42> (a, b, c);
++ case 43:
++ return VSXTernaryLogic<43> (a, b, c);
++ case 44:
++ return VSXTernaryLogic<44> (a, b, c);
++ case 45:
++ return VSXTernaryLogic<45> (a, b, c);
++ case 46:
++ return VSXTernaryLogic<46> (a, b, c);
++ case 47:
++ return VSXTernaryLogic<47> (a, b, c);
++ case 48:
++ return VSXTernaryLogic<48> (a, b, c);
++ case 49:
++ return VSXTernaryLogic<49> (a, b, c);
++ case 50:
++ return VSXTernaryLogic<50> (a, b, c);
++ case 51:
++ return VSXTernaryLogic<51> (a, b, c);
++ case 52:
++ return VSXTernaryLogic<52> (a, b, c);
++ case 53:
++ return VSXTernaryLogic<53> (a, b, c);
++ case 54:
++ return VSXTernaryLogic<54> (a, b, c);
++ case 55:
++ return VSXTernaryLogic<55> (a, b, c);
++ case 56:
++ return VSXTernaryLogic<56> (a, b, c);
++ case 57:
++ return VSXTernaryLogic<57> (a, b, c);
++ case 58:
++ return VSXTernaryLogic<58> (a, b, c);
++ case 59:
++ return VSXTernaryLogic<59> (a, b, c);
++ case 60:
++ return VSXTernaryLogic<60> (a, b, c);
++ case 61:
++ return VSXTernaryLogic<61> (a, b, c);
++ case 62:
++ return VSXTernaryLogic<62> (a, b, c);
++ case 63:
++ return VSXTernaryLogic<63> (a, b, c);
++ case 64:
++ return VSXTernaryLogic<64> (a, b, c);
++ case 65:
++ return VSXTernaryLogic<65> (a, b, c);
++ case 66:
++ return VSXTernaryLogic<66> (a, b, c);
++ case 67:
++ return VSXTernaryLogic<67> (a, b, c);
++ case 68:
++ return VSXTernaryLogic<68> (a, b, c);
++ case 69:
++ return VSXTernaryLogic<69> (a, b, c);
++ case 70:
++ return VSXTernaryLogic<70> (a, b, c);
++ case 71:
++ return VSXTernaryLogic<71> (a, b, c);
++ case 72:
++ return VSXTernaryLogic<72> (a, b, c);
++ case 73:
++ return VSXTernaryLogic<73> (a, b, c);
++ case 74:
++ return VSXTernaryLogic<74> (a, b, c);
++ case 75:
++ return VSXTernaryLogic<75> (a, b, c);
++ case 76:
++ return VSXTernaryLogic<76> (a, b, c);
++ case 77:
++ return VSXTernaryLogic<77> (a, b, c);
++ case 78:
++ return VSXTernaryLogic<78> (a, b, c);
++ case 79:
++ return VSXTernaryLogic<79> (a, b, c);
++ case 80:
++ return VSXTernaryLogic<80> (a, b, c);
++ case 81:
++ return VSXTernaryLogic<81> (a, b, c);
++ case 82:
++ return VSXTernaryLogic<82> (a, b, c);
++ case 83:
++ return VSXTernaryLogic<83> (a, b, c);
++ case 84:
++ return VSXTernaryLogic<84> (a, b, c);
++ case 85:
++ return VSXTernaryLogic<85> (a, b, c);
++ case 86:
++ return VSXTernaryLogic<86> (a, b, c);
++ case 87:
++ return VSXTernaryLogic<87> (a, b, c);
++ case 88:
++ return VSXTernaryLogic<88> (a, b, c);
++ case 89:
++ return VSXTernaryLogic<89> (a, b, c);
++ case 90:
++ return VSXTernaryLogic<90> (a, b, c);
++ case 91:
++ return VSXTernaryLogic<91> (a, b, c);
++ case 92:
++ return VSXTernaryLogic<92> (a, b, c);
++ case 93:
++ return VSXTernaryLogic<93> (a, b, c);
++ case 94:
++ return VSXTernaryLogic<94> (a, b, c);
++ case 95:
++ return VSXTernaryLogic<95> (a, b, c);
++ case 96:
++ return VSXTernaryLogic<96> (a, b, c);
++ case 97:
++ return VSXTernaryLogic<97> (a, b, c);
++ case 98:
++ return VSXTernaryLogic<98> (a, b, c);
++ case 99:
++ return VSXTernaryLogic<99> (a, b, c);
++ case 100:
++ return VSXTernaryLogic<100> (a, b, c);
++ case 101:
++ return VSXTernaryLogic<101> (a, b, c);
++ case 102:
++ return VSXTernaryLogic<102> (a, b, c);
++ case 103:
++ return VSXTernaryLogic<103> (a, b, c);
++ case 104:
++ return VSXTernaryLogic<104> (a, b, c);
++ case 105:
++ return VSXTernaryLogic<105> (a, b, c);
++ case 106:
++ return VSXTernaryLogic<106> (a, b, c);
++ case 107:
++ return VSXTernaryLogic<107> (a, b, c);
++ case 108:
++ return VSXTernaryLogic<108> (a, b, c);
++ case 109:
++ return VSXTernaryLogic<109> (a, b, c);
++ case 110:
++ return VSXTernaryLogic<110> (a, b, c);
++ case 111:
++ return VSXTernaryLogic<111> (a, b, c);
++ case 112:
++ return VSXTernaryLogic<112> (a, b, c);
++ case 113:
++ return VSXTernaryLogic<113> (a, b, c);
++ case 114:
++ return VSXTernaryLogic<114> (a, b, c);
++ case 115:
++ return VSXTernaryLogic<115> (a, b, c);
++ case 116:
++ return VSXTernaryLogic<116> (a, b, c);
++ case 117:
++ return VSXTernaryLogic<117> (a, b, c);
++ case 118:
++ return VSXTernaryLogic<118> (a, b, c);
++ case 119:
++ return VSXTernaryLogic<119> (a, b, c);
++ case 120:
++ return VSXTernaryLogic<120> (a, b, c);
++ case 121:
++ return VSXTernaryLogic<121> (a, b, c);
++ case 122:
++ return VSXTernaryLogic<122> (a, b, c);
++ case 123:
++ return VSXTernaryLogic<123> (a, b, c);
++ case 124:
++ return VSXTernaryLogic<124> (a, b, c);
++ case 125:
++ return VSXTernaryLogic<125> (a, b, c);
++ case 126:
++ return VSXTernaryLogic<126> (a, b, c);
++ case 127:
++ return VSXTernaryLogic<127> (a, b, c);
++ case 128:
++ return VSXTernaryLogic<128> (a, b, c);
++ case 129:
++ return VSXTernaryLogic<129> (a, b, c);
++ case 130:
++ return VSXTernaryLogic<130> (a, b, c);
++ case 131:
++ return VSXTernaryLogic<131> (a, b, c);
++ case 132:
++ return VSXTernaryLogic<132> (a, b, c);
++ case 133:
++ return VSXTernaryLogic<133> (a, b, c);
++ case 134:
++ return VSXTernaryLogic<134> (a, b, c);
++ case 135:
++ return VSXTernaryLogic<135> (a, b, c);
++ case 136:
++ return VSXTernaryLogic<136> (a, b, c);
++ case 137:
++ return VSXTernaryLogic<137> (a, b, c);
++ case 138:
++ return VSXTernaryLogic<138> (a, b, c);
++ case 139:
++ return VSXTernaryLogic<139> (a, b, c);
++ case 140:
++ return VSXTernaryLogic<140> (a, b, c);
++ case 141:
++ return VSXTernaryLogic<141> (a, b, c);
++ case 142:
++ return VSXTernaryLogic<142> (a, b, c);
++ case 143:
++ return VSXTernaryLogic<143> (a, b, c);
++ case 144:
++ return VSXTernaryLogic<144> (a, b, c);
++ case 145:
++ return VSXTernaryLogic<145> (a, b, c);
++ case 146:
++ return VSXTernaryLogic<146> (a, b, c);
++ case 147:
++ return VSXTernaryLogic<147> (a, b, c);
++ case 148:
++ return VSXTernaryLogic<148> (a, b, c);
++ case 149:
++ return VSXTernaryLogic<149> (a, b, c);
++ case 150:
++ return VSXTernaryLogic<150> (a, b, c);
++ case 151:
++ return VSXTernaryLogic<151> (a, b, c);
++ case 152:
++ return VSXTernaryLogic<152> (a, b, c);
++ case 153:
++ return VSXTernaryLogic<153> (a, b, c);
++ case 154:
++ return VSXTernaryLogic<154> (a, b, c);
++ case 155:
++ return VSXTernaryLogic<155> (a, b, c);
++ case 156:
++ return VSXTernaryLogic<156> (a, b, c);
++ case 157:
++ return VSXTernaryLogic<157> (a, b, c);
++ case 158:
++ return VSXTernaryLogic<158> (a, b, c);
++ case 159:
++ return VSXTernaryLogic<159> (a, b, c);
++ case 160:
++ return VSXTernaryLogic<160> (a, b, c);
++ case 161:
++ return VSXTernaryLogic<161> (a, b, c);
++ case 162:
++ return VSXTernaryLogic<162> (a, b, c);
++ case 163:
++ return VSXTernaryLogic<163> (a, b, c);
++ case 164:
++ return VSXTernaryLogic<164> (a, b, c);
++ case 165:
++ return VSXTernaryLogic<165> (a, b, c);
++ case 166:
++ return VSXTernaryLogic<166> (a, b, c);
++ case 167:
++ return VSXTernaryLogic<167> (a, b, c);
++ case 168:
++ return VSXTernaryLogic<168> (a, b, c);
++ case 169:
++ return VSXTernaryLogic<169> (a, b, c);
++ case 170:
++ return VSXTernaryLogic<170> (a, b, c);
++ case 171:
++ return VSXTernaryLogic<171> (a, b, c);
++ case 172:
++ return VSXTernaryLogic<172> (a, b, c);
++ case 173:
++ return VSXTernaryLogic<173> (a, b, c);
++ case 174:
++ return VSXTernaryLogic<174> (a, b, c);
++ case 175:
++ return VSXTernaryLogic<175> (a, b, c);
++ case 176:
++ return VSXTernaryLogic<176> (a, b, c);
++ case 177:
++ return VSXTernaryLogic<177> (a, b, c);
++ case 178:
++ return VSXTernaryLogic<178> (a, b, c);
++ case 179:
++ return VSXTernaryLogic<179> (a, b, c);
++ case 180:
++ return VSXTernaryLogic<180> (a, b, c);
++ case 181:
++ return VSXTernaryLogic<181> (a, b, c);
++ case 182:
++ return VSXTernaryLogic<182> (a, b, c);
++ case 183:
++ return VSXTernaryLogic<183> (a, b, c);
++ case 184:
++ return VSXTernaryLogic<184> (a, b, c);
++ case 185:
++ return VSXTernaryLogic<185> (a, b, c);
++ case 186:
++ return VSXTernaryLogic<186> (a, b, c);
++ case 187:
++ return VSXTernaryLogic<187> (a, b, c);
++ case 188:
++ return VSXTernaryLogic<188> (a, b, c);
++ case 189:
++ return VSXTernaryLogic<189> (a, b, c);
++ case 190:
++ return VSXTernaryLogic<190> (a, b, c);
++ case 191:
++ return VSXTernaryLogic<191> (a, b, c);
++ case 192:
++ return VSXTernaryLogic<192> (a, b, c);
++ case 193:
++ return VSXTernaryLogic<193> (a, b, c);
++ case 194:
++ return VSXTernaryLogic<194> (a, b, c);
++ case 195:
++ return VSXTernaryLogic<195> (a, b, c);
++ case 196:
++ return VSXTernaryLogic<196> (a, b, c);
++ case 197:
++ return VSXTernaryLogic<197> (a, b, c);
++ case 198:
++ return VSXTernaryLogic<198> (a, b, c);
++ case 199:
++ return VSXTernaryLogic<199> (a, b, c);
++ case 200:
++ return VSXTernaryLogic<200> (a, b, c);
++ case 201:
++ return VSXTernaryLogic<201> (a, b, c);
++ case 202:
++ return VSXTernaryLogic<202> (a, b, c);
++ case 203:
++ return VSXTernaryLogic<203> (a, b, c);
++ case 204:
++ return VSXTernaryLogic<204> (a, b, c);
++ case 205:
++ return VSXTernaryLogic<205> (a, b, c);
++ case 206:
++ return VSXTernaryLogic<206> (a, b, c);
++ case 207:
++ return VSXTernaryLogic<207> (a, b, c);
++ case 208:
++ return VSXTernaryLogic<208> (a, b, c);
++ case 209:
++ return VSXTernaryLogic<209> (a, b, c);
++ case 210:
++ return VSXTernaryLogic<210> (a, b, c);
++ case 211:
++ return VSXTernaryLogic<211> (a, b, c);
++ case 212:
++ return VSXTernaryLogic<212> (a, b, c);
++ case 213:
++ return VSXTernaryLogic<213> (a, b, c);
++ case 214:
++ return VSXTernaryLogic<214> (a, b, c);
++ case 215:
++ return VSXTernaryLogic<215> (a, b, c);
++ case 216:
++ return VSXTernaryLogic<216> (a, b, c);
++ case 217:
++ return VSXTernaryLogic<217> (a, b, c);
++ case 218:
++ return VSXTernaryLogic<218> (a, b, c);
++ case 219:
++ return VSXTernaryLogic<219> (a, b, c);
++ case 220:
++ return VSXTernaryLogic<220> (a, b, c);
++ case 221:
++ return VSXTernaryLogic<221> (a, b, c);
++ case 222:
++ return VSXTernaryLogic<222> (a, b, c);
++ case 223:
++ return VSXTernaryLogic<223> (a, b, c);
++ case 224:
++ return VSXTernaryLogic<224> (a, b, c);
++ case 225:
++ return VSXTernaryLogic<225> (a, b, c);
++ case 226:
++ return VSXTernaryLogic<226> (a, b, c);
++ case 227:
++ return VSXTernaryLogic<227> (a, b, c);
++ case 228:
++ return VSXTernaryLogic<228> (a, b, c);
++ case 229:
++ return VSXTernaryLogic<229> (a, b, c);
++ case 230:
++ return VSXTernaryLogic<230> (a, b, c);
++ case 231:
++ return VSXTernaryLogic<231> (a, b, c);
++ case 232:
++ return VSXTernaryLogic<232> (a, b, c);
++ case 233:
++ return VSXTernaryLogic<233> (a, b, c);
++ case 234:
++ return VSXTernaryLogic<234> (a, b, c);
++ case 235:
++ return VSXTernaryLogic<235> (a, b, c);
++ case 236:
++ return VSXTernaryLogic<236> (a, b, c);
++ case 237:
++ return VSXTernaryLogic<237> (a, b, c);
++ case 238:
++ return VSXTernaryLogic<238> (a, b, c);
++ case 239:
++ return VSXTernaryLogic<239> (a, b, c);
++ case 240:
++ return VSXTernaryLogic<240> (a, b, c);
++ case 241:
++ return VSXTernaryLogic<241> (a, b, c);
++ case 242:
++ return VSXTernaryLogic<242> (a, b, c);
++ case 243:
++ return VSXTernaryLogic<243> (a, b, c);
++ case 244:
++ return VSXTernaryLogic<244> (a, b, c);
++ case 245:
++ return VSXTernaryLogic<245> (a, b, c);
++ case 246:
++ return VSXTernaryLogic<246> (a, b, c);
++ case 247:
++ return VSXTernaryLogic<247> (a, b, c);
++ case 248:
++ return VSXTernaryLogic<248> (a, b, c);
++ case 249:
++ return VSXTernaryLogic<249> (a, b, c);
++ case 250:
++ return VSXTernaryLogic<250> (a, b, c);
++ case 251:
++ return VSXTernaryLogic<251> (a, b, c);
++ case 252:
++ return VSXTernaryLogic<252> (a, b, c);
++ case 253:
++ return VSXTernaryLogic<253> (a, b, c);
++ case 254:
++ return VSXTernaryLogic<254> (a, b, c);
++ case 255:
++ return VSXTernaryLogic<255> (a, b, c);
++ default:
++ return a;
++ }
++}
++
++int
++main (int argc, char **argv)
++{
++ vector unsigned long long a = {0xD8, 0xDB};
++ vector unsigned long long b = {0x6C, 0x6C};
++ vector unsigned long long c = {0x56, 0x56};
++ vector unsigned long long ternlog_result = VSXTernaryLogic (a, b, c, 0xB6);
++
++ if (ternlog_result[0] != 0xffffffffffffff3dull
++ || ternlog_result[1] != 0xffffffffffffff3eull)
++ __builtin_abort ();
++
++ return 0;
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.c-torture/compile/asmgoto-6.c
+@@ -0,0 +1,26 @@
++
++/* { dg-do compile } */
++/* PR middle-end/110420 */
++/* PR middle-end/103979 */
++/* PR middle-end/98619 */
++/* Test that the middle-end does not remove the asm goto
++ with an output. */
++
++static int t;
++void g(void);
++
++void f(void)
++{
++ int __gu_val;
++ asm goto("#my asm "
++ : "=&r"(__gu_val)
++ :
++ :
++ : Efault);
++ t = __gu_val;
++ g();
++Efault:
++}
++
++/* Make sure "my asm " is still in the assembly. */
++/* { dg-final { scan-assembler "my asm " } } */
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.c-torture/compile/pr111699-1.c
+@@ -0,0 +1,7 @@
++typedef unsigned char __attribute__((__vector_size__ (8))) V;
++
++void
++foo (V *v)
++{
++ *v = (V) 0x107B9A7FF >= (*v <= 0);
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.c-torture/execute/20230630-1.c
+@@ -0,0 +1,23 @@
++struct S {
++ short int i : 12;
++ char c1 : 1;
++ char c2 : 1;
++ char c3 : 1;
++ char c4 : 1;
++};
++
++int main (void)
++{
++ struct S s0 = { 341, 1, 1, 1, 1 };
++ char *p = (char *) &s0;
++
++#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
++ if (*p != 85)
++ __builtin_abort ();
++#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ if (*p != 21)
++ __builtin_abort ();
++#endif
++
++ return 0;
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.c-torture/execute/20230630-2.c
+@@ -0,0 +1,29 @@
++#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
++#define REVERSE_SSO __attribute__((scalar_storage_order("big-endian")));
++#else
++#define REVERSE_SSO __attribute__((scalar_storage_order("little-endian")));
++#endif
++
++struct S {
++ short int i : 12;
++ char c1 : 1;
++ char c2 : 1;
++ char c3 : 1;
++ char c4 : 1;
++} REVERSE_SSO;
++
++int main (void)
++{
++ struct S s0 = { 341, 1, 1, 1, 1 };
++ char *p = (char *) &s0;
++
++#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
++ if (*p != 21)
++ __builtin_abort ();
++#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ if (*p != 85)
++ __builtin_abort ();
++#endif
++
++ return 0;
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.c-torture/execute/20230630-3.c
+@@ -0,0 +1,27 @@
++struct S {
++ int i : 24;
++ char c1 : 1;
++ char c2 : 1;
++ char c3 : 1;
++ char c4 : 1;
++ char c5 : 1;
++ char c6 : 1;
++ char c7 : 1;
++ char c8 : 1;
++};
++
++int main (void)
++{
++ struct S s0 = { 1193046, 1, 1, 1, 1, 1, 1, 1, 1 };
++ char *p = (char *) &s0;
++
++#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
++ if (*p != 86)
++ __builtin_abort ();
++#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ if (*p != 18)
++ __builtin_abort ();
++#endif
++
++ return 0;
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.c-torture/execute/20230630-4.c
+@@ -0,0 +1,33 @@
++#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
++#define REVERSE_SSO __attribute__((scalar_storage_order("big-endian")));
++#else
++#define REVERSE_SSO __attribute__((scalar_storage_order("little-endian")));
++#endif
++
++struct S {
++ int i : 24;
++ char c1 : 1;
++ char c2 : 1;
++ char c3 : 1;
++ char c4 : 1;
++ char c5 : 1;
++ char c6 : 1;
++ char c7 : 1;
++ char c8 : 1;
++} REVERSE_SSO;
++
++int main (void)
++{
++ struct S s0 = { 1193046, 1, 1, 1, 1, 1, 1, 1, 1 };
++ char *p = (char *) &s0;
++
++#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
++ if (*p != 18)
++ __builtin_abort ();
++#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ if (*p != 86)
++ __builtin_abort ();
++#endif
++
++ return 0;
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.c-torture/execute/pr109778.c
+@@ -0,0 +1,26 @@
++/* PR tree-optimization/109778 */
++
++int a, b, c, d, *e = &c;
++
++static inline unsigned
++foo (unsigned char x)
++{
++ x = 1 | x << 1;
++ x = x >> 4 | x << 4;
++ return x;
++}
++
++static inline void
++bar (unsigned x)
++{
++ *e = 8 > foo (x + 86) - 86;
++}
++
++int
++main ()
++{
++ d = a && b;
++ bar (d + 4);
++ if (c != 1)
++ __builtin_abort ();
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.c-torture/execute/pr110914.c
+@@ -0,0 +1,22 @@
++/* PR tree-optimization/110914 */
++
++__attribute__ ((noipa)) int
++foo (const char *s, unsigned long l)
++{
++ unsigned char r = 0;
++ __builtin_memcpy (&r, s, l != 0);
++ return r;
++}
++
++int
++main ()
++{
++ const char *p = "123456";
++ int a = foo (p, __builtin_strlen (p) - 5);
++ int b = foo (p, __builtin_strlen (p) - 6);
++ if (a != '1')
++ __builtin_abort ();
++ if (b != 0)
++ __builtin_abort ();
++ return 0;
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.dg/lto/pr109778_0.c
+@@ -0,0 +1,22 @@
++/* PR tree-optimization/109778 */
++/* { dg-lto-do run } */
++/* { dg-lto-options { "-O2 -flto" } } */
++/* { dg-require-effective-target int32 } */
++
++int bar (int);
++
++__attribute__((noipa)) int
++foo (int x)
++{
++ x = bar (x);
++ x = (x << 16) | (int) ((unsigned) x >> 16);
++ return x & 0x10000000;
++}
++
++int
++main ()
++{
++ if (foo (0) || foo (-1))
++ __builtin_abort ();
++ return 0;
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.dg/lto/pr109778_1.c
+@@ -0,0 +1,7 @@
++int
++bar (int x)
++{
++ x &= 0x22222222;
++ x |= (int) 0xf1234567U;
++ return x;
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.dg/pr105660-1.c
+@@ -0,0 +1,13 @@
++/* PR105660
++ * { dg-do compile }
++ * { dg-options "-std=c17" }
++ */
++
++void gatherConservativeVars(int, int, int, int, int, int, int Hnvar, int,
++ int Hnyt, int Hnxyt, int, int Hstep, double[Hnyt],
++ double[Hnvar][Hstep][Hnxyt]);
++void gatherConservativeVars(int, int, int, int, int, int, int Hnvar, int, int Hnyt,
++ int Hnxyt, int, int Hstep, double[Hnyt],
++ double[Hnvar][Hstep][Hnxyt]);
++
++
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.dg/pr105660-2.c
+@@ -0,0 +1,12 @@
++/* PR105660
++ * { dg-do compile }
++ * { dg-options "-Wall -std=c17" }
++ */
++
++
++struct bat_gen_conf_s;
++void batch_generator_create2(struct bat_gen_conf_s* config, int D, int N, const long bat_dims[D][N], const long tot_dims[D][N], const long tot_strs[D][N], const _Complex float* data[D]);
++void batch_generator_create2(struct bat_gen_conf_s* config, int D, int N, const long bat_dims[D][N], const long tot_dims[D][N], const long tot_strs[D][N], const _Complex float* data[D]);
++
++
++
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.dg/pr111015.c
+@@ -0,0 +1,28 @@
++/* PR tree-optimization/111015 */
++/* { dg-do run { target int128 } } */
++/* { dg-options "-O2" } */
++
++struct S { unsigned a : 4, b : 4; unsigned __int128 c : 70; } d;
++
++__attribute__((noipa)) void
++foo (unsigned __int128 x, unsigned char y, unsigned char z)
++{
++ d.a = y;
++ d.b = z;
++ d.c = x;
++}
++
++int
++main ()
++{
++ foo (-1, 12, 5);
++ if (d.a != 12
++ || d.b != 5
++ || d.c != (-1ULL | (((unsigned __int128) 0x3f) << 64)))
++ __builtin_abort ();
++ foo (0x123456789abcdef0ULL | (((unsigned __int128) 26) << 64), 7, 11);
++ if (d.a != 7
++ || d.b != 11
++ || d.c != (0x123456789abcdef0ULL | (((unsigned __int128) 26) << 64)))
++ __builtin_abort ();
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.dg/rtl/aarch64/pr111411.c
+@@ -0,0 +1,57 @@
++/* { dg-do compile { target aarch64*-*-* } } */
++/* { dg-require-effective-target lp64 } */
++/* { dg-options "-O -fdisable-rtl-postreload -fpeephole2 -fno-schedule-fusion" } */
++
++extern int data[];
++
++void __RTL (startwith ("ira")) foo (void *ptr)
++{
++ (function "foo"
++ (param "ptr"
++ (DECL_RTL (reg/v:DI <0> [ ptr ]))
++ (DECL_RTL_INCOMING (reg/v:DI x0 [ ptr ]))
++ ) ;; param "ptr"
++ (insn-chain
++ (block 2
++ (edge-from entry (flags "FALLTHRU"))
++ (cnote 3 [bb 2] NOTE_INSN_BASIC_BLOCK)
++ (insn 4 (set (reg:DI <0>) (reg:DI x0)))
++ (insn 5 (set (reg:DI <1>)
++ (plus:DI (reg:DI <0>) (const_int 768))))
++ (insn 6 (set (mem:SI (plus:DI (reg:DI <0>)
++ (const_int 508)) [1 &data+508 S4 A4])
++ (const_int 0)))
++ (insn 7 (set (mem:SI (plus:DI (reg:DI <1>)
++ (const_int -256)) [1 &data+512 S4 A4])
++ (const_int 0)))
++ (edge-to exit (flags "FALLTHRU"))
++ ) ;; block 2
++ ) ;; insn-chain
++ ) ;; function
++}
++
++void __RTL (startwith ("ira")) bar (void *ptr)
++{
++ (function "bar"
++ (param "ptr"
++ (DECL_RTL (reg/v:DI <0> [ ptr ]))
++ (DECL_RTL_INCOMING (reg/v:DI x0 [ ptr ]))
++ ) ;; param "ptr"
++ (insn-chain
++ (block 2
++ (edge-from entry (flags "FALLTHRU"))
++ (cnote 3 [bb 2] NOTE_INSN_BASIC_BLOCK)
++ (insn 4 (set (reg:DI <0>) (reg:DI x0)))
++ (insn 5 (set (reg:DI <1>)
++ (plus:DI (reg:DI <0>) (const_int 768))))
++ (insn 6 (set (mem:SI (plus:DI (reg:DI <1>)
++ (const_int -256)) [1 &data+512 S4 A4])
++ (const_int 0)))
++ (insn 7 (set (mem:SI (plus:DI (reg:DI <0>)
++ (const_int 508)) [1 &data+508 S4 A4])
++ (const_int 0)))
++ (edge-to exit (flags "FALLTHRU"))
++ ) ;; block 2
++ ) ;; insn-chain
++ ) ;; function
++}
+--- a/src/gcc/testsuite/gcc.dg/tls/pr78796.c
++++ b/src/gcc/testsuite/gcc.dg/tls/pr78796.c
+@@ -1,7 +1,7 @@
+ /* PR target/78796 */
+ /* { dg-do run } */
+ /* { dg-options "-O2" } */
+-/* { dg-additional-options "-mcmodel=large" { target aarch64-*-* } } */
++/* { dg-additional-options "-mcmodel=large -fno-pie -no-pie" { target aarch64-*-* } } */
+ /* { dg-require-effective-target tls_runtime } */
+ /* { dg-add-options tls } */
+
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.dg/torture/pr110298.c
+@@ -0,0 +1,20 @@
++/* { dg-do compile } */
++
++int a, b, c, d, e;
++int f() {
++ c = 0;
++ for (; c >= 0; c--) {
++ d = 0;
++ for (; d <= 0; d++) {
++ e = 0;
++ for (; d + c + e >= 0; e--)
++ ;
++ a = 1;
++ b = 0;
++ for (; a; ++b)
++ a *= 2;
++ for (; b + d >= 0;)
++ return 0;
++ }
++ }
++}
+--- a/src/gcc/testsuite/gcc.dg/vect/pr108950.c
++++ b/src/gcc/testsuite/gcc.dg/vect/pr108950.c
+@@ -1,5 +1,5 @@
+-/* { dg-require-effective-target vect_simd_clones } */
+ /* { dg-do compile } */
++/* { dg-require-effective-target vect_simd_clones } */
+
+ int m;
+ short int n;
+--- a/src/gcc/testsuite/gcc.dg/vect/pr97428.c
++++ b/src/gcc/testsuite/gcc.dg/vect/pr97428.c
+@@ -1,4 +1,5 @@
+ /* { dg-do compile } */
++/* { dg-require-effective-target vect_double } */
+
+ typedef struct { double re, im; } dcmlx_t;
+ typedef struct { double re[4], im[4]; } dcmlx4_t;
+--- a/src/gcc/testsuite/gcc.target/aarch64/aapcs64/aapcs64.exp
++++ b/src/gcc/testsuite/gcc.target/aarch64/aapcs64/aapcs64.exp
+@@ -27,7 +27,7 @@ if { ![istarget aarch64*-*-*] } then {
+
+ torture-init
+ set-torture-options $C_TORTURE_OPTIONS
+-set additional_flags "-W -Wall -Wno-abi"
++set additional_flags "-W -Wall -Wno-abi -fno-pie -no-pie"
+
+ # Test parameter passing. This uses abitest.S which relies on weak
+ # symbols.
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/aarch64/acle/ls64_lto.c
+@@ -0,0 +1,10 @@
++/* { dg-do link { target aarch64_asm_ls64_ok } } */
++/* { dg-additional-options "-march=armv8.7-a -flto" } */
++#include <arm_acle.h>
++int main(void)
++{
++ data512_t d = __arm_ld64b ((const void *)0x1000);
++ __arm_st64b ((void *)0x2000, d);
++ uint64_t x = __arm_st64bv ((void *)0x3000, d);
++ x += __arm_st64bv0 ((void *)0x4000, d);
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/aarch64/acle/pr110100.c
+@@ -0,0 +1,7 @@
++/* { dg-do compile } */
++/* { dg-options "-march=armv8.7-a -O2" } */
++#include <arm_acle.h>
++void do_st64b(data512_t data) {
++ __arm_st64b((void*)0x10000000, data);
++}
++/* { dg-final { scan-assembler {mov\tx([123])?[0-9], 268435456} } } */
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/aarch64/acle/pr110132.c
+@@ -0,0 +1,15 @@
++/* { dg-do compile } */
++/* { dg-additional-options "-march=armv8.7-a" } */
++
++/* Check that ls64 builtins can be invoked using a preprocesed testcase
++ without triggering bogus builtin warnings, see PR110132.
++
++ Note that this is purely to test GCC internals and user code should
++ include arm_acle.h to make use of these builtins. */
++
++#pragma GCC aarch64 "arm_acle.h"
++typedef __arm_data512_t data512_t;
++void f(void *p, data512_t d)
++{
++ __arm_st64b (p, d);
++}
+--- a/src/gcc/testsuite/gcc.target/aarch64/auto-init-7.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/auto-init-7.c
+@@ -1,6 +1,6 @@
+ /* Verify zero initialization for array, union, and structure type automatic variables. */
+ /* { dg-do compile } */
+-/* { dg-options "-ftrivial-auto-var-init=zero -fdump-rtl-expand" } */
++/* { dg-options "-ftrivial-auto-var-init=zero -fdump-rtl-expand -fno-stack-protector" } */
+
+ struct S
+ {
+--- a/src/gcc/testsuite/gcc.target/aarch64/fuse_adrp_add_1.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/fuse_adrp_add_1.c
+@@ -1,6 +1,6 @@
+ /* { dg-do compile } */
+ /* { dg-require-effective-target aarch64_small } */
+-/* { dg-options "-O3 -mcpu=cortex-a57" } */
++/* { dg-options "-O3 -mcpu=cortex-a57 -fno-pie" } */
+
+ enum reg_class { NO_REGS, AP_REG, XRF_REGS, GENERAL_REGS, AGRF_REGS,
+ XGRF_REGS, ALL_REGS, LIM_REG_CLASSES };
+--- a/src/gcc/testsuite/gcc.target/aarch64/mops_4.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/mops_4.c
+@@ -50,6 +50,54 @@ copy3 (int *x, int *y, long z, long *res)
+ *res = z;
+ }
+
++/*
++** move1:
++** mov (x[0-9]+), x0
++** cpyp \[\1\]!, \[x1\]!, x2!
++** cpym \[\1\]!, \[x1\]!, x2!
++** cpye \[\1\]!, \[x1\]!, x2!
++** str x0, \[x3\]
++** ret
++*/
++void
++move1 (int *x, int *y, long z, int **res)
++{
++ __builtin_memmove (x, y, z);
++ *res = x;
++}
++
++/*
++** move2:
++** mov (x[0-9]+), x1
++** cpyp \[x0\]!, \[\1\]!, x2!
++** cpym \[x0\]!, \[\1\]!, x2!
++** cpye \[x0\]!, \[\1\]!, x2!
++** str x1, \[x3\]
++** ret
++*/
++void
++move2 (int *x, int *y, long z, int **res)
++{
++ __builtin_memmove (x, y, z);
++ *res = y;
++}
++
++/*
++** move3:
++** mov (x[0-9]+), x2
++** cpyp \[x0\]!, \[x1\]!, \1!
++** cpym \[x0\]!, \[x1\]!, \1!
++** cpye \[x0\]!, \[x1\]!, \1!
++** str x2, \[x3\]
++** ret
++*/
++void
++move3 (int *x, int *y, long z, long *res)
++{
++ __builtin_memmove (x, y, z);
++ *res = z;
++}
++
+ /*
+ ** set1:
+ ** mov (x[0-9]+), x0
+--- a/src/gcc/testsuite/gcc.target/aarch64/pr103147-10.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/pr103147-10.c
+@@ -1,4 +1,4 @@
+-/* { dg-options "-O2 -fpack-struct -mstrict-align" } */
++/* { dg-options "-O2 -fpack-struct -mstrict-align -fno-stack-protector" } */
+ /* { dg-final { check-function-bodies "**" "" "" } } */
+
+ #include <arm_neon.h>
+--- a/src/gcc/testsuite/gcc.target/aarch64/pr104005.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/pr104005.c
+@@ -1,4 +1,4 @@
+-/* { dg-options "-O2 -funroll-loops" } */
++/* { dg-options "-O2 -funroll-loops -fno-stack-protector" } */
+
+ typedef int v2 __attribute__((vector_size(8)));
+
+--- a/src/gcc/testsuite/gcc.target/aarch64/pr63304_1.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/pr63304_1.c
+@@ -1,6 +1,6 @@
+ /* { dg-do assemble } */
+ /* { dg-require-effective-target lp64 } */
+-/* { dg-options "-O1 --save-temps" } */
++/* { dg-options "-O1 --save-temps -fno-pie" } */
+ #pragma GCC push_options
+ #pragma GCC target ("+nothing+simd,cmodel=small")
+
+--- a/src/gcc/testsuite/gcc.target/aarch64/pr70120-2.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/pr70120-2.c
+@@ -1,6 +1,6 @@
+ /* { dg-do compile } */
+ /* { dg-require-effective-target lp64 } */
+-/* { dg-options "-Og -freorder-functions -g3 -mcmodel=large" } */
++/* { dg-options "-Og -freorder-functions -g3 -mcmodel=large -fno-pie" } */
+
+ typedef short v32u16 __attribute__ ((vector_size (32)));
+ typedef int v32u32 __attribute__ ((vector_size (32)));
+--- a/src/gcc/testsuite/gcc.target/aarch64/pr78733.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/pr78733.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -mcmodel=large -mpc-relative-literal-loads" } */
++/* { dg-options "-O2 -mcmodel=large -mpc-relative-literal-loads -fno-pie" } */
+ /* { dg-require-effective-target lp64 } */
+ /* { dg-skip-if "-mcmodel=large, no support for -fpic" { aarch64-*-* } { "-fpic" } { "" } } */
+
+--- a/src/gcc/testsuite/gcc.target/aarch64/pr79041-2.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/pr79041-2.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -mcmodel=large -mpc-relative-literal-loads" } */
++/* { dg-options "-O2 -mcmodel=large -mpc-relative-literal-loads -fno-pie" } */
+ /* { dg-require-effective-target lp64 } */
+ /* { dg-skip-if "-mcmodel=large, no support for -fpic" { aarch64-*-* } { "-fpic" } { "" } } */
+
+--- a/src/gcc/testsuite/gcc.target/aarch64/pr94530.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/pr94530.c
+@@ -1,6 +1,6 @@
+ /* { dg-do compile } */
+ /* { dg-require-effective-target lp64 } */
+-/* { dg-options "-Os -mcpu=falkor -mpc-relative-literal-loads -mcmodel=large" } */
++/* { dg-options "-Os -mcpu=falkor -mpc-relative-literal-loads -mcmodel=large -fno-pie" } */
+
+ extern void bar(const char *);
+
+--- a/src/gcc/testsuite/gcc.target/aarch64/pr94577.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/pr94577.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-mcmodel=large -mabi=ilp32" } */
++/* { dg-options "-mcmodel=large -mabi=ilp32 -fno-pie" } */
+
+ void
+ foo ()
+--- a/src/gcc/testsuite/gcc.target/aarch64/reload-valid-spoff.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/reload-valid-spoff.c
+@@ -1,6 +1,6 @@
+ /* { dg-do compile } */
+ /* { dg-require-effective-target lp64 } */
+-/* { dg-options "-O2 -mcmodel=large -fno-builtin" } */
++/* { dg-options "-O2 -mcmodel=large -fno-builtin -fno-pie" } */
+ /* { dg-skip-if "-mcmodel=large -fPIC not currently supported" { aarch64-*-* } { "-fPIC" } { "" } } */
+
+ typedef long unsigned int size_t;
+--- a/src/gcc/testsuite/gcc.target/aarch64/shrink_wrap_1.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/shrink_wrap_1.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile { target { aarch64*-*-* } } } */
+-/* { dg-options "-O2" } */
++/* { dg-options "-O2 -fno-stack-protector" } */
+ /* { dg-final { check-function-bodies "**" "" } } */
+
+ /*
+--- a/src/gcc/testsuite/gcc.target/aarch64/stack-check-cfa-1.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/stack-check-cfa-1.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16 -funwind-tables" } */
++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16 -funwind-tables -fno-stack-protector" } */
+ /* { dg-require-effective-target supports_stack_clash_protection } */
+
+ #define SIZE 128*1024
+--- a/src/gcc/testsuite/gcc.target/aarch64/stack-check-cfa-2.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/stack-check-cfa-2.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16 -funwind-tables" } */
++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16 -funwind-tables -fno-stack-protector" } */
+ /* { dg-require-effective-target supports_stack_clash_protection } */
+
+ #define SIZE 1280*1024 + 512
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c
+@@ -0,0 +1,55 @@
++/* { dg-options "-O2 -fstack-clash-protection -fomit-frame-pointer --param stack-clash-protection-guard-size=12" } */
++/* { dg-final { check-function-bodies "**" "" } } */
++
++void f(int, ...);
++void g();
++
++/*
++** test1:
++** ...
++** str x30, \[sp\]
++** sub sp, sp, #1024
++** cbnz w0, .*
++** bl g
++** ...
++*/
++int test1(int z) {
++ __uint128_t x = 0;
++ int y[0x400];
++ if (z)
++ {
++ f(0, 0, 0, 0, 0, 0, 0, &y,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x);
++ }
++ g();
++ return 1;
++}
++
++/*
++** test2:
++** ...
++** str x30, \[sp\]
++** sub sp, sp, #1040
++** str xzr, \[sp, #?1024\]
++** cbnz w0, .*
++** bl g
++** ...
++*/
++int test2(int z) {
++ __uint128_t x = 0;
++ int y[0x400];
++ if (z)
++ {
++ f(0, 0, 0, 0, 0, 0, 0, &y,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x);
++ }
++ g();
++ return 1;
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c
+@@ -0,0 +1,100 @@
++/* { dg-options "-O2 -fstack-clash-protection -fomit-frame-pointer --param stack-clash-protection-guard-size=12" } */
++/* { dg-final { check-function-bodies "**" "" } } */
++
++void f(int, ...);
++void g();
++
++/*
++** test1:
++** ...
++** str x30, \[sp\]
++** sub sp, sp, #4064
++** str xzr, \[sp, #?1024\]
++** cbnz w0, .*
++** bl g
++** ...
++** str x26, \[sp, #?4128\]
++** ...
++*/
++int test1(int z) {
++ __uint128_t x = 0;
++ int y[0x400];
++ if (z)
++ {
++ asm volatile ("" :::
++ "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26");
++ f(0, 0, 0, 0, 0, 0, 0, &y,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x);
++ }
++ g();
++ return 1;
++}
++
++/*
++** test2:
++** ...
++** str x30, \[sp\]
++** sub sp, sp, #1040
++** str xzr, \[sp, #?1024\]
++** cbnz w0, .*
++** bl g
++** ...
++*/
++int test2(int z) {
++ __uint128_t x = 0;
++ int y[0x400];
++ if (z)
++ {
++ asm volatile ("" :::
++ "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26");
++ f(0, 0, 0, 0, 0, 0, 0, &y,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x);
++ }
++ g();
++ return 1;
++}
++
++/*
++** test3:
++** ...
++** str x30, \[sp\]
++** sub sp, sp, #1024
++** cbnz w0, .*
++** bl g
++** ...
++*/
++int test3(int z) {
++ __uint128_t x = 0;
++ int y[0x400];
++ if (z)
++ {
++ asm volatile ("" :::
++ "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26");
++ f(0, 0, 0, 0, 0, 0, 0, &y,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x);
++ }
++ g();
++ return 1;
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-19.c
+@@ -0,0 +1,100 @@
++/* { dg-options "-O2 -fstack-clash-protection -fomit-frame-pointer --param stack-clash-protection-guard-size=12 -fsanitize=shadow-call-stack -ffixed-x18" } */
++/* { dg-final { check-function-bodies "**" "" } } */
++
++void f(int, ...);
++void g();
++
++/*
++** test1:
++** ...
++** str x30, \[sp\]
++** sub sp, sp, #4064
++** str xzr, \[sp, #?1024\]
++** cbnz w0, .*
++** bl g
++** ...
++** str x26, \[sp, #?4128\]
++** ...
++*/
++int test1(int z) {
++ __uint128_t x = 0;
++ int y[0x400];
++ if (z)
++ {
++ asm volatile ("" :::
++ "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26");
++ f(0, 0, 0, 0, 0, 0, 0, &y,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x);
++ }
++ g();
++ return 1;
++}
++
++/*
++** test2:
++** ...
++** str x30, \[sp\]
++** sub sp, sp, #1040
++** str xzr, \[sp, #?1024\]
++** cbnz w0, .*
++** bl g
++** ...
++*/
++int test2(int z) {
++ __uint128_t x = 0;
++ int y[0x400];
++ if (z)
++ {
++ asm volatile ("" :::
++ "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26");
++ f(0, 0, 0, 0, 0, 0, 0, &y,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x);
++ }
++ g();
++ return 1;
++}
++
++/*
++** test3:
++** ...
++** str x30, \[sp\]
++** sub sp, sp, #1024
++** cbnz w0, .*
++** bl g
++** ...
++*/
++int test3(int z) {
++ __uint128_t x = 0;
++ int y[0x400];
++ if (z)
++ {
++ asm volatile ("" :::
++ "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26");
++ f(0, 0, 0, 0, 0, 0, 0, &y,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x);
++ }
++ g();
++ return 1;
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-20.c
+@@ -0,0 +1,3 @@
++/* { dg-options "-O2 -fstack-protector-all -fstack-clash-protection -fomit-frame-pointer --param stack-clash-protection-guard-size=12 -fsanitize=shadow-call-stack -ffixed-x18" } */
++
++#include "stack-check-prologue-19.c"
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/aarch64/stack-protector-8.c
+@@ -0,0 +1,95 @@
++/* { dg-options " -O -fstack-protector-strong -mstack-protector-guard=sysreg -mstack-protector-guard-reg=tpidr2_el0 -mstack-protector-guard-offset=16" } */
++/* { dg-final { check-function-bodies "**" "" } } */
++
++void g(void *);
++__SVBool_t *h(void *);
++
++/*
++** test1:
++** sub sp, sp, #288
++** stp x29, x30, \[sp, #?272\]
++** add x29, sp, #?272
++** mrs (x[0-9]+), tpidr2_el0
++** ldr (x[0-9]+), \[\1, #?16\]
++** str \2, \[sp, #?264\]
++** mov \2, #?0
++** add x0, sp, #?8
++** bl g
++** ...
++** mrs .*
++** ...
++** bne .*
++** ...
++** ldp x29, x30, \[sp, #?272\]
++** add sp, sp, #?288
++** ret
++** bl __stack_chk_fail
++*/
++int test1() {
++ int y[0x40];
++ g(y);
++ return 1;
++}
++
++/*
++** test2:
++** stp x29, x30, \[sp, #?-16\]!
++** mov x29, sp
++** sub sp, sp, #1040
++** mrs (x[0-9]+), tpidr2_el0
++** ldr (x[0-9]+), \[\1, #?16\]
++** str \2, \[sp, #?1032\]
++** mov \2, #?0
++** add x0, sp, #?8
++** bl g
++** ...
++** mrs .*
++** ...
++** bne .*
++** ...
++** add sp, sp, #?1040
++** ldp x29, x30, \[sp\], #?16
++** ret
++** bl __stack_chk_fail
++*/
++int test2() {
++ int y[0x100];
++ g(y);
++ return 1;
++}
++
++#pragma GCC target "+sve"
++
++/*
++** test3:
++** stp x29, x30, \[sp, #?-16\]!
++** mov x29, sp
++** addvl sp, sp, #-18
++** ...
++** str p4, \[sp\]
++** ...
++** sub sp, sp, #272
++** mrs (x[0-9]+), tpidr2_el0
++** ldr (x[0-9]+), \[\1, #?16\]
++** str \2, \[sp, #?264\]
++** mov \2, #?0
++** add x0, sp, #?8
++** bl h
++** ...
++** mrs .*
++** ...
++** bne .*
++** ...
++** add sp, sp, #?272
++** ...
++** ldr p4, \[sp\]
++** ...
++** addvl sp, sp, #18
++** ldp x29, x30, \[sp\], #?16
++** ret
++** bl __stack_chk_fail
++*/
++__SVBool_t test3() {
++ int y[0x40];
++ return *h(y);
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/aarch64/stack-protector-9.c
+@@ -0,0 +1,33 @@
++/* { dg-options "-O2 -mcpu=neoverse-v1 -fstack-protector-all" } */
++/* { dg-final { check-function-bodies "**" "" } } */
++
++/*
++** main:
++** ...
++** stp x29, x30, \[sp, #?-[0-9]+\]!
++** ...
++** sub sp, sp, #[0-9]+
++** ...
++** str x[0-9]+, \[x29, #?-8\]
++** ...
++*/
++int f(const char *);
++void g(void *);
++int main(int argc, char* argv[])
++{
++ int a;
++ int b;
++ char c[2+f(argv[1])];
++ int d[0x100];
++ char y;
++
++ y=42; a=4; b=10;
++ c[0] = 'h'; c[1] = '\0';
++
++ c[f(argv[2])] = '\0';
++
++ __builtin_printf("%d %d\n%s\n", a, b, c);
++ g(d);
++
++ return 0;
++}
+--- a/src/gcc/testsuite/gcc.target/aarch64/sve/pcs/aarch64-sve-pcs.exp
++++ b/src/gcc/testsuite/gcc.target/aarch64/sve/pcs/aarch64-sve-pcs.exp
+@@ -37,11 +37,12 @@ if ![info exists DEFAULT_CFLAGS] then {
+ # Initialize `dg'.
+ dg-init
+
+-# Force SVE if we're not testing it already.
++# Force SVE if we're not testing it already. And, disable stack protector
++# to avoid test failures with --enable-default-ssp.
+ if { [check_effective_target_aarch64_sve] } {
+- set sve_flags ""
++ set sve_flags "-fno-stack-protector"
+ } else {
+- set sve_flags "-march=armv8.2-a+sve"
++ set sve_flags "-march=armv8.2-a+sve -fno-stack-protector"
+ }
+
+ # Main loop.
+--- a/src/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_3.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_3.c
+@@ -11,11 +11,10 @@
+ ** mov x11, sp
+ ** ...
+ ** sub sp, sp, x13
+-** str p4, \[sp\]
+ ** cbz w0, [^\n]*
++** str p4, \[sp\]
+ ** ...
+ ** ptrue p0\.b, all
+-** ldr p4, \[sp\]
+ ** addvl sp, sp, #1
+ ** ldr x24, \[sp\], 32
+ ** ret
+@@ -39,13 +38,12 @@ test_1 (int n)
+ ** mov x11, sp
+ ** ...
+ ** sub sp, sp, x13
+-** str p4, \[sp\]
+ ** cbz w0, [^\n]*
++** str p4, \[sp\]
+ ** str p5, \[sp, #1, mul vl\]
+ ** str p6, \[sp, #2, mul vl\]
+ ** ...
+ ** ptrue p0\.b, all
+-** ldr p4, \[sp\]
+ ** addvl sp, sp, #1
+ ** ldr x24, \[sp\], 32
+ ** ret
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/aarch64/sve/pr109505.c
+@@ -0,0 +1,12 @@
++/* PR tree-optimization/109505 */
++/* { dg-do compile } */
++/* { dg-options "-O2 -march=armv8.2-a+sve" } */
++
++#pragma GCC aarch64 "arm_sve.h"
++
++unsigned long
++foo (unsigned long x)
++{
++ unsigned long y = svcntb ();
++ return (x | 15) & y;
++}
+--- a/src/gcc/testsuite/gcc.target/aarch64/test_frame_17.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/test_frame_17.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2" } */
++/* { dg-options "-O2 -fno-stack-protector" } */
+
+ /* Test reuse of stack adjustment temporaries. */
+
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/alpha/pr106966.c
+@@ -0,0 +1,13 @@
++/* PR target/106906 */
++/* { dg-do compile } */
++/* { dg-options "-O2 -mbuild-constants" } */
++
++void
++do_console (unsigned short *vga)
++{
++ vga[0] = 'H';
++ vga[1] = 'e';
++ vga[2] = 'l';
++ vga[3] = 'l';
++ vga[4] = 'o';
++}
+--- a/src/gcc/testsuite/gcc.target/arm/acle/cde-mve-full-assembly.c
++++ b/src/gcc/testsuite/gcc.target/arm/acle/cde-mve-full-assembly.c
+@@ -567,80 +567,80 @@
+ contain back references). */
+ /*
+ ** test_cde_vcx1q_mfloat16x8_tintint:
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
+ ** vpst
+ ** vcx1t p0, q0, #32
+ ** bx lr
+ */
+ /*
+ ** test_cde_vcx1q_mfloat32x4_tintint:
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
+ ** vpst
+ ** vcx1t p0, q0, #32
+ ** bx lr
+ */
+ /*
+ ** test_cde_vcx1q_muint8x16_tintint:
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
+ ** vpst
+ ** vcx1t p0, q0, #32
+ ** bx lr
+ */
+ /*
+ ** test_cde_vcx1q_muint16x8_tintint:
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
+ ** vpst
+ ** vcx1t p0, q0, #32
+ ** bx lr
+ */
+ /*
+ ** test_cde_vcx1q_muint32x4_tintint:
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
+ ** vpst
+ ** vcx1t p0, q0, #32
+ ** bx lr
+ */
+ /*
+ ** test_cde_vcx1q_muint64x2_tintint:
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
+ ** vpst
+ ** vcx1t p0, q0, #32
+ ** bx lr
+ */
+ /*
+ ** test_cde_vcx1q_mint8x16_tintint:
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
+ ** vpst
+ ** vcx1t p0, q0, #32
+ ** bx lr
+ */
+ /*
+ ** test_cde_vcx1q_mint16x8_tintint:
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
+ ** vpst
+ ** vcx1t p0, q0, #32
+ ** bx lr
+ */
+ /*
+ ** test_cde_vcx1q_mint32x4_tintint:
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
+ ** vpst
+ ** vcx1t p0, q0, #32
+ ** bx lr
+ */
+ /*
+ ** test_cde_vcx1q_mint64x2_tintint:
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
+ ** vpst
+ ** vcx1t p0, q0, #32
+ ** bx lr
+@@ -649,80 +649,80 @@
+
+ /*
+ ** test_cde_vcx1qa_mfloat16x8_tintint:
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
+ ** vpst
+ ** vcx1at p0, q0, #32
+ ** bx lr
+ */
+ /*
+ ** test_cde_vcx1qa_mfloat32x4_tintint:
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
+ ** vpst
+ ** vcx1at p0, q0, #32
+ ** bx lr
+ */
+ /*
+ ** test_cde_vcx1qa_muint8x16_tintint:
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
+ ** vpst
+ ** vcx1at p0, q0, #32
+ ** bx lr
+ */
+ /*
+ ** test_cde_vcx1qa_muint16x8_tintint:
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
+ ** vpst
+ ** vcx1at p0, q0, #32
+ ** bx lr
+ */
+ /*
+ ** test_cde_vcx1qa_muint32x4_tintint:
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
+ ** vpst
+ ** vcx1at p0, q0, #32
+ ** bx lr
+ */
+ /*
+ ** test_cde_vcx1qa_muint64x2_tintint:
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
+ ** vpst
+ ** vcx1at p0, q0, #32
+ ** bx lr
+ */
+ /*
+ ** test_cde_vcx1qa_mint8x16_tintint:
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
+ ** vpst
+ ** vcx1at p0, q0, #32
+ ** bx lr
+ */
+ /*
+ ** test_cde_vcx1qa_mint16x8_tintint:
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
+ ** vpst
+ ** vcx1at p0, q0, #32
+ ** bx lr
+ */
+ /*
+ ** test_cde_vcx1qa_mint32x4_tintint:
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
+ ** vpst
+ ** vcx1at p0, q0, #32
+ ** bx lr
+ */
+ /*
+ ** test_cde_vcx1qa_mint64x2_tintint:
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
+-** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr P0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
++** (?:vldr\.64 d0, \.L[0-9]*\n\tvldr\.64 d1, \.L[0-9]*\+8|vmsr p0, r2 @ movhi)
+ ** vpst
+ ** vcx1at p0, q0, #32
+ ** bx lr
+@@ -731,8 +731,8 @@
+
+ /*
+ ** test_cde_vcx2q_mfloat16x8_tuint16x8_tint:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
+ ** vpst
+ ** vcx2t p0, (q[0-7]), q0, #32
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -740,8 +740,8 @@
+ */
+ /*
+ ** test_cde_vcx2q_mfloat16x8_tfloat32x4_tint:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
+ ** vpst
+ ** vcx2t p0, (q[0-7]), q0, #32
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -749,8 +749,8 @@
+ */
+ /*
+ ** test_cde_vcx2q_mfloat32x4_tuint8x16_tint:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
+ ** vpst
+ ** vcx2t p0, (q[0-7]), q0, #32
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -758,8 +758,8 @@
+ */
+ /*
+ ** test_cde_vcx2q_mint64x2_tuint8x16_tint:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
+ ** vpst
+ ** vcx2t p0, (q[0-7]), q0, #32
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -767,8 +767,8 @@
+ */
+ /*
+ ** test_cde_vcx2q_mint8x16_tuint8x16_tint:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
+ ** vpst
+ ** vcx2t p0, (q[0-7]), q0, #32
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -776,8 +776,8 @@
+ */
+ /*
+ ** test_cde_vcx2q_muint16x8_tuint8x16_tint:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
+ ** vpst
+ ** vcx2t p0, (q[0-7]), q0, #32
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -785,8 +785,8 @@
+ */
+ /*
+ ** test_cde_vcx2q_muint8x16_tint64x2_tint:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
+ ** vpst
+ ** vcx2t p0, (q[0-7]), q0, #32
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -794,8 +794,8 @@
+ */
+ /*
+ ** test_cde_vcx2q_muint8x16_tint8x16_tint:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
+ ** vpst
+ ** vcx2t p0, (q[0-7]), q0, #32
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -803,8 +803,8 @@
+ */
+ /*
+ ** test_cde_vcx2q_muint8x16_tuint16x8_tint:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
+ ** vpst
+ ** vcx2t p0, (q[0-7]), q0, #32
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -812,8 +812,8 @@
+ */
+ /*
+ ** test_cde_vcx2q_muint8x16_tuint8x16_tint:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
+ ** vpst
+ ** vcx2t p0, (q[0-7]), q0, #32
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -823,8 +823,8 @@
+
+ /*
+ ** test_cde_vcx2qa_mfloat16x8_tuint16x8_tint:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
+ ** vpst
+ ** vcx2at p0, (q[0-7]), q0, #32
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -832,8 +832,8 @@
+ */
+ /*
+ ** test_cde_vcx2qa_mfloat16x8_tfloat32x4_tint:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
+ ** vpst
+ ** vcx2at p0, (q[0-7]), q0, #32
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -841,8 +841,8 @@
+ */
+ /*
+ ** test_cde_vcx2qa_mfloat32x4_tuint8x16_tint:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
+ ** vpst
+ ** vcx2at p0, (q[0-7]), q0, #32
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -850,8 +850,8 @@
+ */
+ /*
+ ** test_cde_vcx2qa_mint64x2_tuint8x16_tint:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
+ ** vpst
+ ** vcx2at p0, (q[0-7]), q0, #32
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -859,8 +859,8 @@
+ */
+ /*
+ ** test_cde_vcx2qa_mint8x16_tuint8x16_tint:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
+ ** vpst
+ ** vcx2at p0, (q[0-7]), q0, #32
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -868,8 +868,8 @@
+ */
+ /*
+ ** test_cde_vcx2qa_muint16x8_tuint8x16_tint:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
+ ** vpst
+ ** vcx2at p0, (q[0-7]), q0, #32
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -877,8 +877,8 @@
+ */
+ /*
+ ** test_cde_vcx2qa_muint8x16_tint64x2_tint:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
+ ** vpst
+ ** vcx2at p0, (q[0-7]), q0, #32
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -886,8 +886,8 @@
+ */
+ /*
+ ** test_cde_vcx2qa_muint8x16_tint8x16_tint:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
+ ** vpst
+ ** vcx2at p0, (q[0-7]), q0, #32
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -895,8 +895,8 @@
+ */
+ /*
+ ** test_cde_vcx2qa_muint8x16_tuint16x8_tint:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
+ ** vpst
+ ** vcx2at p0, (q[0-7]), q0, #32
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -904,8 +904,8 @@
+ */
+ /*
+ ** test_cde_vcx2qa_muint8x16_tuint8x16_tint:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r1 @ movhi)
+ ** vpst
+ ** vcx2at p0, (q[0-7]), q0, #32
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -915,8 +915,8 @@
+
+ /*
+ ** test_cde_vcx3q_muint8x16_tuint8x16_tuint8x16_t:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
+ ** vpst
+ ** vcx3t p0, (q[0-7]), q0, q1, #15
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -924,8 +924,8 @@
+ */
+ /*
+ ** test_cde_vcx3q_mfloat16x8_tfloat16x8_tfloat16x8_t:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
+ ** vpst
+ ** vcx3t p0, (q[0-7]), q0, q1, #15
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -933,8 +933,8 @@
+ */
+ /*
+ ** test_cde_vcx3q_mfloat32x4_tuint64x2_tfloat16x8_t:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
+ ** vpst
+ ** vcx3t p0, (q[0-7]), q0, q1, #15
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -942,8 +942,8 @@
+ */
+ /*
+ ** test_cde_vcx3q_muint16x8_tuint8x16_tuint8x16_t:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
+ ** vpst
+ ** vcx3t p0, (q[0-7]), q0, q1, #15
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -951,8 +951,8 @@
+ */
+ /*
+ ** test_cde_vcx3q_muint8x16_tuint16x8_tuint8x16_t:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
+ ** vpst
+ ** vcx3t p0, (q[0-7]), q0, q1, #15
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -960,8 +960,8 @@
+ */
+ /*
+ ** test_cde_vcx3q_muint8x16_tuint8x16_tuint16x8_t:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
+ ** vpst
+ ** vcx3t p0, (q[0-7]), q0, q1, #15
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -969,8 +969,8 @@
+ */
+ /*
+ ** test_cde_vcx3q_mint8x16_tuint8x16_tuint8x16_t:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
+ ** vpst
+ ** vcx3t p0, (q[0-7]), q0, q1, #15
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -978,8 +978,8 @@
+ */
+ /*
+ ** test_cde_vcx3q_muint8x16_tint8x16_tuint8x16_t:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
+ ** vpst
+ ** vcx3t p0, (q[0-7]), q0, q1, #15
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -987,8 +987,8 @@
+ */
+ /*
+ ** test_cde_vcx3q_muint8x16_tuint8x16_tint8x16_t:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
+ ** vpst
+ ** vcx3t p0, (q[0-7]), q0, q1, #15
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -996,8 +996,8 @@
+ */
+ /*
+ ** test_cde_vcx3q_mint64x2_tuint8x16_tuint8x16_t:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
+ ** vpst
+ ** vcx3t p0, (q[0-7]), q0, q1, #15
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -1005,8 +1005,8 @@
+ */
+ /*
+ ** test_cde_vcx3q_muint8x16_tint64x2_tuint8x16_t:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
+ ** vpst
+ ** vcx3t p0, (q[0-7]), q0, q1, #15
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -1014,8 +1014,8 @@
+ */
+ /*
+ ** test_cde_vcx3q_muint8x16_tuint8x16_tint64x2_t:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
+ ** vpst
+ ** vcx3t p0, (q[0-7]), q0, q1, #15
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -1023,8 +1023,8 @@
+ */
+ /*
+ ** test_cde_vcx3q_muint8x16_tint64x2_tint64x2_t:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
+ ** vpst
+ ** vcx3t p0, (q[0-7]), q0, q1, #15
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -1034,8 +1034,8 @@
+
+ /*
+ ** test_cde_vcx3qa_muint8x16_tuint8x16_tuint8x16_t:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
+ ** vpst
+ ** vcx3at p0, (q[0-7]), q0, q1, #15
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -1043,8 +1043,8 @@
+ */
+ /*
+ ** test_cde_vcx3qa_mfloat16x8_tfloat16x8_tfloat16x8_t:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
+ ** vpst
+ ** vcx3at p0, (q[0-7]), q0, q1, #15
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -1052,8 +1052,8 @@
+ */
+ /*
+ ** test_cde_vcx3qa_mfloat32x4_tuint64x2_tfloat16x8_t:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
+ ** vpst
+ ** vcx3at p0, (q[0-7]), q0, q1, #15
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -1061,8 +1061,8 @@
+ */
+ /*
+ ** test_cde_vcx3qa_muint16x8_tuint8x16_tuint8x16_t:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
+ ** vpst
+ ** vcx3at p0, (q[0-7]), q0, q1, #15
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -1070,8 +1070,8 @@
+ */
+ /*
+ ** test_cde_vcx3qa_muint8x16_tuint16x8_tuint8x16_t:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
+ ** vpst
+ ** vcx3at p0, (q[0-7]), q0, q1, #15
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -1079,8 +1079,8 @@
+ */
+ /*
+ ** test_cde_vcx3qa_muint8x16_tuint8x16_tuint16x8_t:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
+ ** vpst
+ ** vcx3at p0, (q[0-7]), q0, q1, #15
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -1088,8 +1088,8 @@
+ */
+ /*
+ ** test_cde_vcx3qa_mint8x16_tuint8x16_tuint8x16_t:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
+ ** vpst
+ ** vcx3at p0, (q[0-7]), q0, q1, #15
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -1097,8 +1097,8 @@
+ */
+ /*
+ ** test_cde_vcx3qa_muint8x16_tint8x16_tuint8x16_t:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
+ ** vpst
+ ** vcx3at p0, (q[0-7]), q0, q1, #15
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -1106,8 +1106,8 @@
+ */
+ /*
+ ** test_cde_vcx3qa_muint8x16_tuint8x16_tint8x16_t:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
+ ** vpst
+ ** vcx3at p0, (q[0-7]), q0, q1, #15
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -1115,8 +1115,8 @@
+ */
+ /*
+ ** test_cde_vcx3qa_mint64x2_tuint8x16_tuint8x16_t:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
+ ** vpst
+ ** vcx3at p0, (q[0-7]), q0, q1, #15
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -1124,8 +1124,8 @@
+ */
+ /*
+ ** test_cde_vcx3qa_muint8x16_tint64x2_tuint8x16_t:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
+ ** vpst
+ ** vcx3at p0, (q[0-7]), q0, q1, #15
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -1133,8 +1133,8 @@
+ */
+ /*
+ ** test_cde_vcx3qa_muint8x16_tuint8x16_tint64x2_t:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
+ ** vpst
+ ** vcx3at p0, (q[0-7]), q0, q1, #15
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+@@ -1142,8 +1142,8 @@
+ */
+ /*
+ ** test_cde_vcx3qa_muint8x16_tint64x2_tint64x2_t:
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
+-** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr P0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
++** (?:vldr\.64 d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64 d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr p0, r0 @ movhi)
+ ** vpst
+ ** vcx3at p0, (q[0-7]), q0, q1, #15
+ ** vmov q0, \1([[:space:]]+@ [^\n]*)?
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/general/preserve_user_namespace_1.c
+@@ -0,0 +1,6 @@
++/* { dg-do compile } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-add-options arm_v8_1m_mve } */
++
++#define __ARM_MVE_PRESERVE_USER_NAMESPACE
++#include <arm_mve.h>
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/asrl.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/asrl.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** asrl (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int64_t
+-asrl_reg (int64_t longval3, int32_t x)
++foo (int64_t value, int32_t shift)
+ {
+- return asrl (longval3, x);
++ return asrl (value, shift);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "asrl\\tr\[0-9\]+, r\[0-9\]+, r\[0-9\]+" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/lsll.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/lsll.c
+@@ -1,13 +1,40 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** lsll (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint64_t
+-lsll_reg (uint64_t longval3, int32_t x)
++foo (uint64_t value, int32_t shift)
+ {
+- return lsll (longval3, x);
++ return lsll (value, shift);
++}
++
++/*
++**foo1:
++** ...
++** lsll (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint64_t
++foo1 (int32_t shift)
++{
++ return lsll (1, shift);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "lsll\\tr\[0-9\]+, r\[0-9\]+, r\[0-9\]+" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_fp_vaddq_n.c
++++ /dev/null
+@@ -1,47 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include <arm_mve.h>
+-int8x16_t foo (int8x16_t a, int16_t b)
+-{
+- return vaddq (a, (b<<3));
+-}
+-int16x8_t foo1 (int16x8_t a, int16_t b)
+-{
+- return vaddq (a, (b<<3));
+-}
+-int32x4_t foo2 (int32x4_t a, int16_t b)
+-{
+- return vaddq (a, (b<<3));
+-}
+-uint8x16_t foo3 (uint8x16_t a, int16_t b)
+-{
+- return vaddq (a, (b<<3));
+-}
+-uint16x8_t foo4 (uint16x8_t a, int16_t b)
+-{
+- return vaddq (a, (b<<3));
+-}
+-uint32x4_t foo5 (uint32x4_t a, int16_t b)
+-{
+- return vaddq (a, (b<<3));
+-}
+-float16x8_t foo6 (float16x8_t a)
+-{
+- return vaddq (a, (float16_t)23.6);
+-}
+-float32x4_t foo7 (float32x4_t a)
+-{
+- return vaddq (a, (float32_t)23.46);
+-}
+-float16x8_t foo8 (float16x8_t a)
+-{
+- return vaddq (a, 23.6);
+-}
+-float32x4_t foo9 (float32x4_t a)
+-{
+- return vaddq (a, 23.46);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_intrinsic_type_overloads-fp.c
+@@ -0,0 +1,61 @@
++/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
++/* { dg-add-options arm_v8_1m_mve_fp } */
++/* { dg-additional-options "-O2 -Wno-pedantic -Wno-long-long -Wno-incompatible-pointer-types" { target c } } */
++/* { dg-additional-options "-O2 -Wno-pedantic -Wno-long-long -fpermissive" { target c++ } } */
++#include "arm_mve.h"
++
++float f1;
++double f2;
++float16_t f3;
++float32_t f4;
++__fp16 f5;
++#ifndef __cplusplus
++_Float16 f6;
++#endif
++
++float16x8_t floatvec;
++
++/* Test a few different supported ways of passing a scalar int value.
++The intrinsic vmulq was chosen arbitrarily, but it is representative of
++all intrinsics that take a non-const scalar value. */
++void
++test_scalars (void)
++{
++ /* Test a few different supported ways of passing a float value. */
++ floatvec = vmulq(floatvec, 0.5);
++ floatvec = vmulq(floatvec, 0.5f);
++ floatvec = vmulq(floatvec, (__fp16) 0.5);
++ floatvec = vmulq(floatvec, f1);
++ floatvec = vmulq(floatvec, f2);
++ floatvec = vmulq(floatvec, f3);
++ floatvec = vmulq(floatvec, f4);
++ floatvec = vmulq(floatvec, f5);
++#ifndef __cplusplus
++ floatvec = vmulq(floatvec, f6);
++ floatvec = vmulq(floatvec, 0.15f16);
++ floatvec = vmulq(floatvec, (_Float16) 0.15);
++#endif
++}
++
++/* Next, test a number of valid pointer overloads. */
++void
++foo11 (__fp16 * addr, float16x8_t value)
++{
++ vst1q (addr, value);
++}
++
++#ifndef __cplusplus
++void
++foo12 (_Float16 * addr, float16x8_t value)
++{
++ vst1q (addr, value);
++}
++#endif
++
++void
++foo13 (float * addr, float32x4_t value)
++{
++ vst1q (addr, value);
++}
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_intrinsic_type_overloads-int.c
+@@ -0,0 +1,100 @@
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-add-options arm_v8_1m_mve } */
++/* { dg-additional-options "-O2 -Wno-pedantic -Wno-long-long -Wno-incompatible-pointer-types" { target c } } */
++/* { dg-additional-options "-O2 -Wno-pedantic -Wno-long-long -fpermissive" { target c++ } } */
++#include "arm_mve.h"
++
++int i1;
++short i2;
++long i3;
++long long i4;
++int8_t i5;
++int16_t i6;
++int32_t i7;
++int64_t i8;
++
++int16x8_t intvec;
++
++/* Test a few different supported ways of passing a scalar int value.
++The intrinsic vmulq was chosen arbitrarily, but it is representative of
++all intrinsics that take a non-const scalar value. */
++void
++test_scalars (void)
++{
++ intvec = vmulq(intvec, 2);
++ intvec = vmulq(intvec, (int32_t) 2);
++ intvec = vmulq(intvec, (short) 2);
++ intvec = vmulq(intvec, i1);
++ intvec = vmulq(intvec, i2);
++ intvec = vmulq(intvec, i3);
++ intvec = vmulq(intvec, i4);
++ intvec = vmulq(intvec, i5);
++ intvec = vmulq(intvec, i6);
++ intvec = vmulq(intvec, i7);
++ intvec = vmulq(intvec, i8);
++}
++
++/* Next, test a number of valid pointer overloads. */
++void
++foo1 (signed char * addr, int8x16_t value)
++{
++ vst1q (addr, value);
++}
++
++void
++foo2 (short * addr, int16x8_t value)
++{
++ vst1q (addr, value);
++}
++
++/* Glibc defines int32_t as 'int' while newlib defines it as 'long int'.
++
++ Although these correspond to the same size, g++ complains when using the
++ 'wrong' version:
++ invalid conversion from 'long int*' to 'int32_t*' {aka 'int*'} [-fpermissive]
++
++ The trick below is to make this test pass whether using glibc-based or
++ newlib-based toolchains. */
++
++#if defined(__GLIBC__)
++#define word_type int
++#else
++#define word_type long int
++#endif
++void
++foo3 (word_type * addr, int32x4_t value)
++{
++ vst1q (addr, value);
++}
++
++void
++foo5 (long long * addr, uint64x2_t value)
++{
++ vldrdq_gather_offset (addr, value);
++}
++
++void
++foo6 (unsigned char * addr, uint8x16_t value)
++{
++ vst1q (addr, value);
++}
++
++void
++foo7 (unsigned short * addr, uint16x8_t value)
++{
++ vst1q (addr, value);
++}
++
++void
++foo8 (unsigned word_type * addr, uint32x4_t value)
++{
++ vst1q (addr, value);
++}
++
++void
++foo10 (unsigned long long * addr, uint64x2_t value)
++{
++ vldrdq_gather_offset (addr, value);
++}
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vaddq_m.c
++++ /dev/null
+@@ -1,48 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include <arm_mve.h>
+-mve_pred16_t p;
+-
+-int32x4_t fn1 (int32x4_t vecIdx)
+-{
+- return vaddq_m(vuninitializedq_s32(), vecIdx, 1, p);
+-}
+-
+-int16x8_t fn2 (int16x8_t vecIdx)
+-{
+- return vaddq_m(vuninitializedq_s16(), vecIdx, 1, p);
+-}
+-
+-int8x16_t fn3 (int8x16_t vecIdx)
+-{
+- return vaddq_m(vuninitializedq_s8(), vecIdx, 1, p);
+-}
+-
+-uint32x4_t fn4 (uint32x4_t vecIdx)
+-{
+- return vaddq_m(vuninitializedq_u32(), vecIdx, 1, p);
+-}
+-
+-uint16x8_t fn5 (uint16x8_t vecIdx)
+-{
+- return vaddq_m(vuninitializedq_u16(), vecIdx, 1, p);
+-}
+-
+-uint8x16_t fn6 (uint8x16_t vecIdx)
+-{
+- return vaddq_m(vuninitializedq_u8(), vecIdx, 1, p);
+-}
+-
+-float32x4_t fn7 (float32x4_t vecIdx)
+-{
+- return vaddq_m(vuninitializedq_f32(), vecIdx, (float32_t) 1.23, p);
+-}
+-
+-float16x8_t fn8 (float16x8_t vecIdx)
+-{
+- return vaddq_m(vuninitializedq_f16(), vecIdx, (float16_t) 1.40, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vaddq_n.c
++++ /dev/null
+@@ -1,31 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include <arm_mve.h>
+-int8x16_t foo (int8x16_t a, int16_t b)
+-{
+- return vaddq (a, (b<<3));
+-}
+-int16x8_t foo1 (int16x8_t a, int16_t b)
+-{
+- return vaddq (a, (b<<3));
+-}
+-int32x4_t foo2 (int32x4_t a, int16_t b)
+-{
+- return vaddq (a, (b<<3));
+-}
+-uint8x16_t foo3 (uint8x16_t a, int16_t b)
+-{
+- return vaddq (a, (b<<3));
+-}
+-uint16x8_t foo4 (uint16x8_t a, int16_t b)
+-{
+- return vaddq (a, (b<<3));
+-}
+-uint32x4_t foo5 (uint32x4_t a, int16_t b)
+-{
+- return vaddq (a, (b<<3));
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vddupq_m_n_u16.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-uint16x8_t
+-foo1 (uint16x8_t inactive, int32_t a, mve_pred16_t p)
+-{
+- return vddupq_m (inactive, a, 1, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vddupq_m_n_u32.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-uint32x4_t
+-foo1 (uint32x4_t inactive, int32_t a, mve_pred16_t p)
+-{
+- return vddupq_m (inactive, a, 4, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vddupq_m_n_u8.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-uint8x16_t
+-foo1 (uint8x16_t inactive, int32_t a, mve_pred16_t p)
+-{
+- return vddupq_m (inactive, a, 4, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vddupq_n_u16.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-uint16x8_t
+-foo1 (int32_t a)
+-{
+- return vddupq_u16 (a, 4);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vddupq_n_u32.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-uint32x4_t
+-foo1 (int32_t a)
+-{
+- return vddupq_u32 (a, 1);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vddupq_n_u8.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-uint8x16_t
+-foo1 (int32_t a)
+-{
+- return vddupq_u8 (a, 1);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vddupq_x_n_u16.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-uint16x8_t
+-foo1 (int32_t a, mve_pred16_t p)
+-{
+- return vddupq_x_u16 (a, 1, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vddupq_x_n_u32.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-uint32x4_t
+-foo1 (int32_t a, mve_pred16_t p)
+-{
+- return vddupq_x_u32 (a, 4, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vddupq_x_n_u8.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-uint8x16_t
+-foo1 (int32_t a, mve_pred16_t p)
+-{
+- return vddupq_x_u8 (a, 4, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vdwdupq_x_n_u16.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-uint16x8_t
+-foo1 (int32_t a, uint32_t b, mve_pred16_t p)
+-{
+- return vdwdupq_x_u16 (a, b, 1, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vdwdupq_x_n_u32.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-uint32x4_t
+-foo1 (int32_t a, uint32_t b, mve_pred16_t p)
+-{
+- return vdwdupq_x_u32 (a, b, 4, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vdwdupq_x_n_u8.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-uint8x16_t
+-foo1 (int32_t a, uint32_t b, mve_pred16_t p)
+-{
+- return vdwdupq_x_u8 (a, b, 4, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vidupq_m_n_u16.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-uint16x8_t
+-foo1 (uint16x8_t inactive, int32_t a, mve_pred16_t p)
+-{
+- return vidupq_m (inactive, a, 4, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vidupq_m_n_u32.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-uint32x4_t
+-foo1 (uint32x4_t inactive, int32_t a, mve_pred16_t p)
+-{
+- return vidupq_m (inactive, a, 1, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vidupq_m_n_u8.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-uint8x16_t
+-foo1 (uint8x16_t inactive, int32_t a, mve_pred16_t p)
+-{
+- return vidupq_m (inactive, a, 1, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vidupq_n_u16.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-uint16x8_t
+-foo1 (int32_t a)
+-{
+- return vidupq_u16 (a, 4);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vidupq_n_u32.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-uint32x4_t
+-foo1 (int32_t a)
+-{
+- return vidupq_u32 (a, 1);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vidupq_n_u8.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-uint8x16_t
+-foo1 (int32_t a)
+-{
+- return vidupq_u8 (a, 1);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vidupq_x_n_u16.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-uint16x8_t
+-foo1 (int32_t a, mve_pred16_t p)
+-{
+- return vidupq_x_u16 (a, 4, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vidupq_x_n_u32.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-uint32x4_t
+-foo1 (int32_t a, mve_pred16_t p)
+-{
+- return vidupq_x_u32 (a, 1, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vidupq_x_n_u8.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-uint8x16_t
+-foo1 (int32_t a, mve_pred16_t p)
+-{
+- return vidupq_x_u8 (a, 1, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_viwdupq_x_n_u16.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-uint16x8_t
+-foo1 (int32_t a, uint32_t b, mve_pred16_t p)
+-{
+- return viwdupq_x_u16 (a, b, 2, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_viwdupq_x_n_u32.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-uint32x4_t
+-foo1 (int32_t a, uint32_t b, mve_pred16_t p)
+-{
+- return viwdupq_x_u32 (a, b, 4, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_viwdupq_x_n_u8.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-uint8x16_t
+-foo1 (int32_t a, uint32_t b, mve_pred16_t p)
+-{
+- return viwdupq_x_u8 (a, b, 8, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrdq_gather_offset_s64.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-int64x2_t
+-foo1 (int64_t * base, uint64x2_t offset)
+-{
+- return vldrdq_gather_offset (base, offset);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrdq_gather_offset_u64.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-uint64x2_t
+-foo1 (uint64_t * base, uint64x2_t offset)
+-{
+- return vldrdq_gather_offset (base, offset);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrdq_gather_offset_z_s64.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-int64x2_t
+-foo1 (int64_t * base, uint64x2_t offset, mve_pred16_t p)
+-{
+- return vldrdq_gather_offset_z (base, offset, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrdq_gather_offset_z_u64.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-uint64x2_t
+-foo1 (uint64_t * base, uint64x2_t offset, mve_pred16_t p)
+-{
+- return vldrdq_gather_offset_z (base, offset, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrdq_gather_shifted_offset_s64.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-int64x2_t
+-foo1 (int64_t * base, uint64x2_t offset)
+-{
+- return vldrdq_gather_shifted_offset (base, offset);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrdq_gather_shifted_offset_u64.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-uint64x2_t
+-foo1 (uint64_t * base, uint64x2_t offset)
+-{
+- return vldrdq_gather_shifted_offset (base, offset);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrdq_gather_shifted_offset_z_s64.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-int64x2_t
+-foo1 (int64_t * base, uint64x2_t offset, mve_pred16_t p)
+-{
+- return vldrdq_gather_shifted_offset_z (base, offset, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrdq_gather_shifted_offset_z_u64.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-uint64x2_t
+-foo1 (uint64_t * base, uint64x2_t offset, mve_pred16_t p)
+-{
+- return vldrdq_gather_shifted_offset_z (base, offset, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_offset_f16.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float16x8_t
+-foo1 (float16_t * base, uint16x8_t offset)
+-{
+- return vldrhq_gather_offset (base, offset);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_offset_s16.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-int16x8_t
+-foo1 (int16_t * base, uint16x8_t offset)
+-{
+- return vldrhq_gather_offset (base, offset);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_offset_s32.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-int32x4_t
+-foo1 (int16_t * base, uint32x4_t offset)
+-{
+- return vldrhq_gather_offset (base, offset);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_offset_u16.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-uint16x8_t
+-foo1 (uint16_t * base, uint16x8_t offset)
+-{
+- return vldrhq_gather_offset (base, offset);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_offset_u32.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-uint32x4_t
+-foo1 (uint16_t * base, uint32x4_t offset)
+-{
+- return vldrhq_gather_offset (base, offset);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_offset_z_f16.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float16x8_t
+-foo1 (float16_t * base, uint16x8_t offset, mve_pred16_t p)
+-{
+- return vldrhq_gather_offset_z (base, offset, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_offset_z_s16.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-int16x8_t
+-foo1 (int16_t * base, uint16x8_t offset, mve_pred16_t p)
+-{
+- return vldrhq_gather_offset_z (base, offset, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_offset_z_s32.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-int32x4_t
+-foo1 (int16_t * base, uint32x4_t offset, mve_pred16_t p)
+-{
+- return vldrhq_gather_offset_z (base, offset, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_offset_z_u16.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-uint16x8_t
+-foo1 (uint16_t * base, uint16x8_t offset, mve_pred16_t p)
+-{
+- return vldrhq_gather_offset_z (base, offset, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_offset_z_u32.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-uint32x4_t
+-foo1 (uint16_t * base, uint32x4_t offset, mve_pred16_t p)
+-{
+- return vldrhq_gather_offset_z (base, offset, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_shifted_offset_f16.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float16x8_t
+-foo1 (float16_t * base, uint16x8_t offset)
+-{
+- return vldrhq_gather_shifted_offset (base, offset);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_shifted_offset_s16.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-int16x8_t
+-foo1 (int16_t * base, uint16x8_t offset)
+-{
+- return vldrhq_gather_shifted_offset (base, offset);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_shifted_offset_s32.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-int32x4_t
+-foo1 (int16_t * base, uint32x4_t offset)
+-{
+- return vldrhq_gather_shifted_offset (base, offset);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_shifted_offset_u16.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-uint16x8_t
+-foo1 (uint16_t * base, uint16x8_t offset)
+-{
+- return vldrhq_gather_shifted_offset (base, offset);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_shifted_offset_u32.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-uint32x4_t
+-foo1 (uint16_t * base, uint32x4_t offset)
+-{
+- return vldrhq_gather_shifted_offset (base, offset);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_shifted_offset_z_f16.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float16x8_t
+-foo1 (float16_t * base, uint16x8_t offset, mve_pred16_t p)
+-{
+- return vldrhq_gather_shifted_offset_z (base, offset, p);
+-}
+-
+-/* { dg-final { scan-assembler "vldrht.f16" } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_shifted_offset_z_s16.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-int16x8_t
+-foo1 (int16_t * base, uint16x8_t offset, mve_pred16_t p)
+-{
+- return vldrhq_gather_shifted_offset_z (base, offset, p);
+-}
+-
+-/* { dg-final { scan-assembler "vldrht.u16" } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_shifted_offset_z_s32.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-int32x4_t
+-foo1 (int16_t * base, uint32x4_t offset, mve_pred16_t p)
+-{
+- return vldrhq_gather_shifted_offset_z (base, offset, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_shifted_offset_z_u16.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-uint16x8_t
+-foo1 (uint16_t * base, uint16x8_t offset, mve_pred16_t p)
+-{
+- return vldrhq_gather_shifted_offset_z (base, offset, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrhq_gather_shifted_offset_z_u32.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-uint32x4_t
+-foo1 (uint16_t * base, uint32x4_t offset, mve_pred16_t p)
+-{
+- return vldrhq_gather_shifted_offset_z (base, offset, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrwq_gather_offset_f32.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float32x4_t
+-foo1 (float32_t * base, uint32x4_t offset)
+-{
+- return vldrwq_gather_offset (base, offset);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrwq_gather_offset_s32.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-int32x4_t
+-foo1 (int32_t * base, uint32x4_t offset)
+-{
+- return vldrwq_gather_offset (base, offset);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrwq_gather_offset_u32.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-uint32x4_t
+-foo1 (uint32_t * base, uint32x4_t offset)
+-{
+- return vldrwq_gather_offset (base, offset);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrwq_gather_offset_z_f32.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float32x4_t
+-foo1 (float32_t * base, uint32x4_t offset, mve_pred16_t p)
+-{
+- return vldrwq_gather_offset_z (base, offset, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrwq_gather_offset_z_s32.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-int32x4_t
+-foo1 (int32_t * base, uint32x4_t offset, mve_pred16_t p)
+-{
+- return vldrwq_gather_offset_z (base, offset, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrwq_gather_offset_z_u32.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-uint32x4_t
+-foo1 (uint32_t * base, uint32x4_t offset, mve_pred16_t p)
+-{
+- return vldrwq_gather_offset_z (base, offset, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrwq_gather_shifted_offset_f32.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float32x4_t
+-foo1 (float32_t * base, uint32x4_t offset)
+-{
+- return vldrwq_gather_shifted_offset (base, offset);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrwq_gather_shifted_offset_s32.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-int32x4_t
+-foo1 (int32_t * base, uint32x4_t offset)
+-{
+- return vldrwq_gather_shifted_offset (base, offset);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrwq_gather_shifted_offset_u32.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-uint32x4_t
+-foo1 (uint32_t * base, uint32x4_t offset)
+-{
+- return vldrwq_gather_shifted_offset (base, offset);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrwq_gather_shifted_offset_z_f32.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float32x4_t
+-foo1 (float32_t * base, uint32x4_t offset, mve_pred16_t p)
+-{
+- return vldrwq_gather_shifted_offset_z (base, offset, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrwq_gather_shifted_offset_z_s32.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-int32x4_t
+-foo1 (int32_t * base, uint32x4_t offset, mve_pred16_t p)
+-{
+- return vldrwq_gather_shifted_offset_z (base, offset, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vldrwq_gather_shifted_offset_z_u32.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+-/* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-uint32x4_t
+-foo1 (uint32_t * base, uint32x4_t offset, mve_pred16_t p)
+-{
+- return vldrwq_gather_shifted_offset_z (base, offset, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vstore_scatter_shifted_offset.c
++++ /dev/null
+@@ -1,141 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-int
+-foowu32( uint32_t * pDataSrc, uint32_t * pDataDest)
+-{
+- const uint32x4_t vecOffs1 = { 0, 3, 6, 1};
+- const uint32x4_t vecOffs2 = { 4, 7, 2, 5};
+- uint32x4_t vecIn1 = vldrwq_u32 ((uint32_t const *) pDataSrc);
+- uint32x4_t vecIn2 = vldrwq_u32 ((uint32_t const *) &pDataSrc[4]);
+- vstrwq_scatter_shifted_offset_u32 (pDataDest, vecOffs1, vecIn1);
+- vstrwq_scatter_shifted_offset_u32 (pDataDest, vecOffs2, vecIn2);
+- pDataDest[8] = pDataSrc[8];
+- return 0;
+-}
+-
+-int
+-foowf32( float32_t * pDataSrc, float32_t * pDataDest)
+-{
+- const uint32x4_t vecOffs1 = { 0, 3, 6, 1};
+- const uint32x4_t vecOffs2 = { 4, 7, 2, 5};
+- float32x4_t vecIn1 = vldrwq_f32 ((float32_t const *) pDataSrc);
+- float32x4_t vecIn2 = vldrwq_f32 ((float32_t const *) &pDataSrc[4]);
+- vstrwq_scatter_shifted_offset_f32 (pDataDest, vecOffs1, vecIn1);
+- vstrwq_scatter_shifted_offset_f32 (pDataDest, vecOffs2, vecIn2);
+- pDataDest[8] = pDataSrc[8];
+- return 0;
+-}
+-
+-int
+-foohu16( uint16_t * pDataSrc, uint16_t * pDataDest)
+-{
+- const uint16x8_t vecOffs1 = { 0, 3, 6, 1, 4, 7, 2, 5};
+- const uint16x8_t vecOffs2 = { 9, 11, 13, 10, 12, 15, 8, 14};
+- uint16x8_t vecIn1 = vldrhq_u16 ((uint16_t const *) pDataSrc);
+- uint16x8_t vecIn2 = vldrhq_u16 ((uint16_t const *) &pDataSrc[8]);
+- vstrhq_scatter_shifted_offset_u16 (pDataDest, vecOffs1, vecIn1);
+- vstrhq_scatter_shifted_offset_u16 (pDataDest, vecOffs2, vecIn2);
+- pDataDest[16] = pDataSrc[16];
+- return 0;
+-}
+-
+-int
+-foohu32( uint32_t * pDataSrc, uint32_t * pDataDest)
+-{
+- const uint32x4_t vecOffs1 = { 0, 3, 6, 1};
+- const uint32x4_t vecOffs2 = { 4, 7, 2, 5};
+- uint32x4_t vecIn1 = vldrhq_u32 ((uint16_t const *) pDataSrc);
+- uint32x4_t vecIn2 = vldrhq_u32 ((uint16_t const *) &pDataSrc[4]);
+- vstrhq_scatter_shifted_offset_u32 ((uint16_t *)pDataDest, vecOffs1, vecIn1);
+- vstrhq_scatter_shifted_offset_u32 ((uint16_t *)pDataDest, vecOffs2, vecIn2);
+- pDataDest[8] = pDataSrc[8];
+- return 0;
+-}
+-
+-int
+-foohf16( float16_t * pDataSrc, float16_t * pDataDest)
+-{
+- const uint16x8_t vecOffs1 = { 0, 3, 6, 1, 4, 7, 2, 5};
+- const uint16x8_t vecOffs2 = { 9, 11, 13, 10, 12, 15, 8, 14};
+- float16x8_t vecIn1 = vldrhq_f16 ((float16_t const *) pDataSrc);
+- float16x8_t vecIn2 = vldrhq_f16 ((float16_t const *) &pDataSrc[8]);
+- vstrhq_scatter_shifted_offset_f16 (pDataDest, vecOffs1, vecIn1);
+- vstrhq_scatter_shifted_offset_f16 (pDataDest, vecOffs2, vecIn2);
+- pDataDest[16] = pDataSrc[16];
+- return 0;
+-}
+-
+-int
+-foodu64( uint64_t * pDataSrc, uint64_t * pDataDest)
+-{
+- const uint64x2_t vecOffs1 = { 0, 1};
+- const uint64x2_t vecOffs2 = { 2, 3};
+- uint32x4_t vecIn1 = vldrwq_u32 ((uint32_t const *) pDataSrc);
+- uint32x4_t vecIn2 = vldrwq_u32 ((uint32_t const *) &pDataSrc[2]);
+-
+- vstrdq_scatter_shifted_offset_u64 (pDataDest, vecOffs1, (uint64x2_t) vecIn1);
+- vstrdq_scatter_shifted_offset_u64 (pDataDest, vecOffs2, (uint64x2_t) vecIn2);
+-
+- pDataDest[2] = pDataSrc[2];
+- return 0;
+-}
+-
+-int
+-foows32( int32_t * pDataSrc, int32_t * pDataDest)
+-{
+- const uint32x4_t vecOffs1 = { 0, 3, 6, 1};
+- const uint32x4_t vecOffs2 = { 4, 7, 2, 5};
+- int32x4_t vecIn1 = vldrwq_s32 ((int32_t const *) pDataSrc);
+- int32x4_t vecIn2 = vldrwq_s32 ((int32_t const *) &pDataSrc[4]);
+- vstrwq_scatter_shifted_offset_s32 (pDataDest, vecOffs1, vecIn1);
+- vstrwq_scatter_shifted_offset_s32 (pDataDest, vecOffs2, vecIn2);
+- pDataDest[8] = pDataSrc[8];
+- return 0;
+-}
+-
+-int
+-foohs16( int16_t * pDataSrc, int16_t * pDataDest)
+-{
+- const uint16x8_t vecOffs1 = { 0, 3, 6, 1, 4, 7, 2, 5};
+- const uint16x8_t vecOffs2 = { 9, 11, 13, 10, 12, 15, 8, 14};
+- int16x8_t vecIn1 = vldrhq_s16 ((int16_t const *) pDataSrc);
+- int16x8_t vecIn2 = vldrhq_s16 ((int16_t const *) &pDataSrc[8]);
+- vstrhq_scatter_shifted_offset_s16 (pDataDest, vecOffs1, vecIn1);
+- vstrhq_scatter_shifted_offset_s16 (pDataDest, vecOffs2, vecIn2);
+- pDataDest[16] = pDataSrc[16];
+- return 0;
+-}
+-
+-int
+-foohs32( int32_t * pDataSrc, int32_t * pDataDest)
+-{
+- const uint32x4_t vecOffs1 = { 0, 3, 6, 1};
+- const uint32x4_t vecOffs2 = { 4, 7, 2, 5};
+- int32x4_t vecIn1 = vldrhq_s32 ((int16_t const *) pDataSrc);
+- int32x4_t vecIn2 = vldrhq_s32 ((int16_t const *) &pDataSrc[4]);
+- vstrhq_scatter_shifted_offset_s32 ((int16_t *)pDataDest, vecOffs1, vecIn1);
+- vstrhq_scatter_shifted_offset_s32 ((int16_t *)pDataDest, vecOffs2, vecIn2);
+- pDataDest[8] = pDataSrc[8];
+- return 0;
+-}
+-
+-int
+-foods64( int64_t * pDataSrc, int64_t * pDataDest)
+-{
+- const uint64x2_t vecOffs1 = { 0, 1};
+- const uint64x2_t vecOffs2 = { 2, 3};
+- int32x4_t vecIn1 = vldrwq_s32 ((int32_t const *) pDataSrc);
+- int32x4_t vecIn2 = vldrwq_s32 ((int32_t const *) &pDataSrc[2]);
+-
+- vstrdq_scatter_shifted_offset_s64 (pDataDest, vecOffs1, (int64x2_t) vecIn1);
+- vstrdq_scatter_shifted_offset_s64 (pDataDest, vecOffs2, (int64x2_t) vecIn2);
+-
+- pDataDest[2] = pDataSrc[2];
+- return 0;
+-}
+-
+-/* { dg-final { scan-assembler-times "vstr\[a-z\]" 20 } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/mve_vstore_scatter_shifted_offset_p.c
++++ /dev/null
+@@ -1,142 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-
+-mve_pred16_t __p;
+-int
+-foowu32( uint32_t * pDataSrc, uint32_t * pDataDest)
+-{
+- const uint32x4_t vecOffs1 = { 0, 3, 6, 1};
+- const uint32x4_t vecOffs2 = { 4, 7, 2, 5};
+- uint32x4_t vecIn1 = vldrwq_z_u32 ((uint32_t const *) pDataSrc, __p);
+- uint32x4_t vecIn2 = vldrwq_z_u32 ((uint32_t const *) &pDataSrc[4], __p);
+- vstrwq_scatter_shifted_offset_p_u32 (pDataDest, vecOffs1, vecIn1, __p);
+- vstrwq_scatter_shifted_offset_p_u32 (pDataDest, vecOffs2, vecIn2, __p);
+- pDataDest[8] = pDataSrc[8];
+- return 0;
+-}
+-
+-int
+-foowf32( float32_t * pDataSrc, float32_t * pDataDest)
+-{
+- const uint32x4_t vecOffs1 = { 0, 3, 6, 1};
+- const uint32x4_t vecOffs2 = { 4, 7, 2, 5};
+- float32x4_t vecIn1 = vldrwq_z_f32 ((float32_t const *) pDataSrc, __p);
+- float32x4_t vecIn2 = vldrwq_z_f32 ((float32_t const *) &pDataSrc[4], __p);
+- vstrwq_scatter_shifted_offset_p_f32 (pDataDest, vecOffs1, vecIn1, __p);
+- vstrwq_scatter_shifted_offset_p_f32 (pDataDest, vecOffs2, vecIn2, __p);
+- pDataDest[8] = pDataSrc[8];
+- return 0;
+-}
+-
+-int
+-foohu16( uint16_t * pDataSrc, uint16_t * pDataDest)
+-{
+- const uint16x8_t vecOffs1 = { 0, 3, 6, 1, 4, 7, 2, 5};
+- const uint16x8_t vecOffs2 = { 9, 11, 13, 10, 12, 15, 8, 14};
+- uint16x8_t vecIn1 = vldrhq_z_u16 ((uint16_t const *) pDataSrc, __p);
+- uint16x8_t vecIn2 = vldrhq_z_u16 ((uint16_t const *) &pDataSrc[8], __p);
+- vstrhq_scatter_shifted_offset_p_u16 (pDataDest, vecOffs1, vecIn1, __p);
+- vstrhq_scatter_shifted_offset_p_u16 (pDataDest, vecOffs2, vecIn2, __p);
+- pDataDest[16] = pDataSrc[16];
+- return 0;
+-}
+-
+-int
+-foohu32( uint32_t * pDataSrc, uint32_t * pDataDest)
+-{
+- const uint32x4_t vecOffs1 = { 0, 3, 6, 1};
+- const uint32x4_t vecOffs2 = { 4, 7, 2, 5};
+- uint32x4_t vecIn1 = vldrhq_z_u32 ((uint16_t const *) pDataSrc, __p);
+- uint32x4_t vecIn2 = vldrhq_z_u32 ((uint16_t const *) &pDataSrc[4], __p);
+- vstrhq_scatter_shifted_offset_p_u32 ((uint16_t *)pDataDest, vecOffs1, vecIn1, __p);
+- vstrhq_scatter_shifted_offset_p_u32 ((uint16_t *)pDataDest, vecOffs2, vecIn2, __p);
+- pDataDest[8] = pDataSrc[8];
+- return 0;
+-}
+-
+-int
+-foohf16( float16_t * pDataSrc, float16_t * pDataDest)
+-{
+- const uint16x8_t vecOffs1 = { 0, 3, 6, 1, 4, 7, 2, 5};
+- const uint16x8_t vecOffs2 = { 9, 11, 13, 10, 12, 15, 8, 14};
+- float16x8_t vecIn1 = vldrhq_z_f16 ((float16_t const *) pDataSrc, __p);
+- float16x8_t vecIn2 = vldrhq_z_f16 ((float16_t const *) &pDataSrc[8], __p);
+- vstrhq_scatter_shifted_offset_p_f16 (pDataDest, vecOffs1, vecIn1, __p);
+- vstrhq_scatter_shifted_offset_p_f16 (pDataDest, vecOffs2, vecIn2, __p);
+- pDataDest[16] = pDataSrc[16];
+- return 0;
+-}
+-
+-int
+-foodu64( uint64_t * pDataSrc, uint64_t * pDataDest)
+-{
+- const uint64x2_t vecOffs1 = { 0, 1};
+- const uint64x2_t vecOffs2 = { 2, 3};
+- uint32x4_t vecIn1 = vldrwq_z_u32 ((uint32_t const *) pDataSrc, __p);
+- uint32x4_t vecIn2 = vldrwq_z_u32 ((uint32_t const *) &pDataSrc[2], __p);
+-
+- vstrdq_scatter_shifted_offset_p_u64 (pDataDest, vecOffs1, (uint64x2_t) vecIn1, __p);
+- vstrdq_scatter_shifted_offset_p_u64 (pDataDest, vecOffs2, (uint64x2_t) vecIn2, __p);
+-
+- pDataDest[2] = pDataSrc[2];
+- return 0;
+-}
+-
+-int
+-foows32( int32_t * pDataSrc, int32_t * pDataDest)
+-{
+- const uint32x4_t vecOffs1 = { 0, 3, 6, 1};
+- const uint32x4_t vecOffs2 = { 4, 7, 2, 5};
+- int32x4_t vecIn1 = vldrwq_z_s32 ((int32_t const *) pDataSrc, __p);
+- int32x4_t vecIn2 = vldrwq_z_s32 ((int32_t const *) &pDataSrc[4], __p);
+- vstrwq_scatter_shifted_offset_p_s32 (pDataDest, vecOffs1, vecIn1, __p);
+- vstrwq_scatter_shifted_offset_p_s32 (pDataDest, vecOffs2, vecIn2, __p);
+- pDataDest[8] = pDataSrc[8];
+- return 0;
+-}
+-
+-int
+-foohs16( int16_t * pDataSrc, int16_t * pDataDest)
+-{
+- const uint16x8_t vecOffs1 = { 0, 3, 6, 1, 4, 7, 2, 5};
+- const uint16x8_t vecOffs2 = { 9, 11, 13, 10, 12, 15, 8, 14};
+- int16x8_t vecIn1 = vldrhq_z_s16 ((int16_t const *) pDataSrc, __p);
+- int16x8_t vecIn2 = vldrhq_z_s16 ((int16_t const *) &pDataSrc[8], __p);
+- vstrhq_scatter_shifted_offset_p_s16 (pDataDest, vecOffs1, vecIn1, __p);
+- vstrhq_scatter_shifted_offset_p_s16 (pDataDest, vecOffs2, vecIn2, __p);
+- pDataDest[16] = pDataSrc[16];
+- return 0;
+-}
+-
+-int
+-foohs32( int32_t * pDataSrc, int32_t * pDataDest)
+-{
+- const uint32x4_t vecOffs1 = { 0, 3, 6, 1};
+- const uint32x4_t vecOffs2 = { 4, 7, 2, 5};
+- int32x4_t vecIn1 = vldrhq_z_s32 ((int16_t const *) pDataSrc, __p);
+- int32x4_t vecIn2 = vldrhq_z_s32 ((int16_t const *) &pDataSrc[4], __p);
+- vstrhq_scatter_shifted_offset_p_s32 ((int16_t *)pDataDest, vecOffs1, vecIn1, __p);
+- vstrhq_scatter_shifted_offset_p_s32 ((int16_t *)pDataDest, vecOffs2, vecIn2, __p);
+- pDataDest[8] = pDataSrc[8];
+- return 0;
+-}
+-
+-int
+-foods64( int64_t * pDataSrc, int64_t * pDataDest)
+-{
+- const uint64x2_t vecOffs1 = { 0, 1};
+- const uint64x2_t vecOffs2 = { 2, 3};
+- int32x4_t vecIn1 = vldrwq_z_s32 ((int32_t const *) pDataSrc, __p);
+- int32x4_t vecIn2 = vldrwq_z_s32 ((int32_t const *) &pDataSrc[2], __p);
+-
+- vstrdq_scatter_shifted_offset_p_s64 (pDataDest, vecOffs1, (int64x2_t) vecIn1, __p);
+- vstrdq_scatter_shifted_offset_p_s64 (pDataDest, vecOffs2, (int64x2_t) vecIn2, __p);
+-
+- pDataDest[2] = pDataSrc[2];
+- return 0;
+-}
+-
+-/* { dg-final { scan-assembler-times "vstr\[a-z\]t" 20 } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/sqrshr.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/sqrshr.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** sqrshr (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32_t
+-sqrshr_reg (int32_t longval3, int32_t x)
++foo (int32_t value, int32_t shift)
+ {
+- return sqrshr (longval3, x);
++ return sqrshr (value, shift);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "sqrshr\\tr\[0-9\]+, r\[0-9\]+" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/sqrshrl_sat48.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/sqrshrl_sat48.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** sqrshrl (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #48, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int64_t
+-sqrshrl_reg (int64_t longval3, int32_t x)
++foo (int64_t value, int32_t shift)
+ {
+- return sqrshrl_sat48 (longval3, x);
++ return sqrshrl_sat48 (value, shift);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "sqrshrl\\tr\[0-9\]+, r\[0-9\]+, #48, r\[0-9\]+" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/sqshl.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/sqshl.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** sqshl (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-sqshl_imm (int32_t longval3)
++foo (int32_t value)
+ {
+- return sqshl (longval3, 25);
++ return sqshl (value, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "sqshl\\tr\[0-9\]+, #25" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/sqshll.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/sqshll.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** sqshll (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-sqshll_imm(int64_t value)
++foo (int64_t value)
+ {
+- return sqshll (value, 21);
++ return sqshll (value, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "sqshll\\tr\[0-9\]+, r\[0-9\]+, #21" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/srshr.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/srshr.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** srshr (?:ip|fp|r[0-9]+), #1(?: @.*|)
++** ...
++*/
+ int32_t
+-srshr_imm (int32_t longval3)
++foo (int32_t value)
+ {
+- return srshr (longval3, 25);
++ return srshr (value, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "srshr\\tr\[0-9\]+, #25" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/srshrl.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/srshrl.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** srshrl (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #1(?: @.*|)
++** ...
++*/
+ int64_t
+-srshrl_imm(int64_t value)
++foo (int64_t value)
+ {
+- return srshrl (value, 21);
++ return srshrl (value, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "srshrl\\tr\[0-9\]+, r\[0-9\]+, #21" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/uqrshl.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/uqrshl.c
+@@ -1,13 +1,40 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** uqrshl (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32_t
+-uqrshl_reg (uint32_t longval3, int32_t x)
++foo (uint32_t value, int32_t shift)
+ {
+- return uqrshl (longval3, x);
++ return uqrshl (value, shift);
++}
++
++/*
++**foo1:
++** ...
++** uqrshl (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint32_t
++foo1 (int32_t shift)
++{
++ return uqrshl (1, shift);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "uqrshl\\tr\[0-9\]+, r\[0-9\]+" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/uqrshll_sat48.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/uqrshll_sat48.c
+@@ -1,13 +1,40 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** uqrshll (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #48, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint64_t
+-uqrshll_reg (uint64_t longval3, int32_t x)
++foo (uint64_t value, int32_t shift)
+ {
+- return uqrshll_sat48 (longval3, x);
++ return uqrshll_sat48 (value, shift);
++}
++
++/*
++**foo1:
++** ...
++** uqrshll (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #48, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint64_t
++foo1 (int32_t shift)
++{
++ return uqrshll_sat48 (1, shift);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "uqrshll\\tr\[0-9\]+, r\[0-9\]+, #48, r\[0-9\]+" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/uqshl.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/uqshl.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** uqshl (?:ip|fp|r[0-9]+), #1(?: @.*|)
++** ...
++*/
+ uint32_t
+-uqshl_imm (uint32_t longval3)
++foo (uint32_t value)
+ {
+- return uqshl (longval3, 21);
++ return uqshl (value, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "uqshl\\tr\[0-9\]+, #21" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/uqshll.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/uqshll.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** uqshll (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #1(?: @.*|)
++** ...
++*/
+ uint64_t
+-uqshll_imm(uint64_t value)
++foo (uint64_t value)
+ {
+- return uqshll (value, 21);
++ return uqshll (value, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "uqshll\\tr\[0-9\]+, r\[0-9\]+, #21" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/urshr.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/urshr.c
+@@ -1,13 +1,40 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
+-uint64_t
+-urshr_imm (uint32_t longval3)
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** urshr (?:ip|fp|r[0-9]+), #1(?: @.*|)
++** ...
++*/
++uint32_t
++foo (uint32_t value)
++{
++ return urshr (value, 1);
++}
++
++/*
++**foo1:
++** ...
++** urshr (?:ip|fp|r[0-9]+), #1(?: @.*|)
++** ...
++*/
++uint32_t
++foo1 ()
+ {
+- return urshr (longval3, 21);
++ return urshr (1, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "urshr\\tr\[0-9\]+, #21" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/urshrl.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/urshrl.c
+@@ -1,13 +1,40 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** urshrl (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #1(?: @.*|)
++** ...
++*/
+ uint64_t
+-urshrl_imm(uint64_t value)
++foo (uint64_t value)
+ {
+- return urshrl (value, 21);
++ return urshrl (value, 1);
++}
++
++/*
++**foo1:
++** ...
++** urshrl (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #1(?: @.*|)
++** ...
++*/
++uint64_t
++foo1 ()
++{
++ return urshrl (1, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "urshrl\\tr\[0-9\]+, r\[0-9\]+, #21" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_p_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_p_s16.c
+@@ -1,21 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabavt.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint32_t a, int16x8_t b, int16x8_t c, mve_pred16_t p)
+ {
+ return vabavq_p_s16 (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vabavt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabavt.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint32_t a, int16x8_t b, int16x8_t c, mve_pred16_t p)
+ {
+ return vabavq_p (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vabavt.s16" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabavt.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++uint32_t
++foo2 (int16x8_t b, int16x8_t c, mve_pred16_t p)
++{
++ return vabavq_p (1, b, c, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_p_s32.c
+@@ -1,21 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabavt.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint32_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
+ {
+ return vabavq_p_s32 (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vabavt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabavt.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint32_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
+ {
+ return vabavq_p (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vabavt.s32" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabavt.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++uint32_t
++foo2 (int32x4_t b, int32x4_t c, mve_pred16_t p)
++{
++ return vabavq_p (1, b, c, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_p_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_p_s8.c
+@@ -1,21 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabavt.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint32_t a, int8x16_t b, int8x16_t c, mve_pred16_t p)
+ {
+ return vabavq_p_s8 (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vabavt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabavt.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint32_t a, int8x16_t b, int8x16_t c, mve_pred16_t p)
+ {
+ return vabavq_p (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vabavt.s8" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabavt.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++uint32_t
++foo2 (int8x16_t b, int8x16_t c, mve_pred16_t p)
++{
++ return vabavq_p (1, b, c, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_p_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_p_u16.c
+@@ -1,21 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabavt.u16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint32_t a, uint16x8_t b, uint16x8_t c, mve_pred16_t p)
+ {
+ return vabavq_p_u16 (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vabavt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabavt.u16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint32_t a, uint16x8_t b, uint16x8_t c, mve_pred16_t p)
+ {
+ return vabavq_p (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vabavt.u16" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabavt.u16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++uint32_t
++foo2 (uint16x8_t b, uint16x8_t c, mve_pred16_t p)
++{
++ return vabavq_p (1, b, c, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_p_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_p_u32.c
+@@ -1,21 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabavt.u32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint32_t a, uint32x4_t b, uint32x4_t c, mve_pred16_t p)
+ {
+ return vabavq_p_u32 (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vabavt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabavt.u32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint32_t a, uint32x4_t b, uint32x4_t c, mve_pred16_t p)
+ {
+ return vabavq_p (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vabavt.u32" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabavt.u32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++uint32_t
++foo2 (uint32x4_t b, uint32x4_t c, mve_pred16_t p)
++{
++ return vabavq_p (1, b, c, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_p_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_p_u8.c
+@@ -1,21 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabavt.u8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint32_t a, uint8x16_t b, uint8x16_t c, mve_pred16_t p)
+ {
+ return vabavq_p_u8 (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vabavt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabavt.u8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint32_t a, uint8x16_t b, uint8x16_t c, mve_pred16_t p)
+ {
+ return vabavq_p (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vabavt.u8" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabavt.u8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++uint32_t
++foo2 (uint8x16_t b, uint8x16_t c, mve_pred16_t p)
++{
++ return vabavq_p (1, b, c, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_s16.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vabav.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint32_t a, int16x8_t b, int16x8_t c)
+ {
+ return vabavq_s16 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vabav.s16" } } */
+
++/*
++**foo1:
++** ...
++** vabav.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint32_t a, int16x8_t b, int16x8_t c)
+ {
+ return vabavq (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vabav.s16" } } */
++/*
++**foo2:
++** ...
++** vabav.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++uint32_t
++foo2 (int16x8_t b, int16x8_t c)
++{
++ return vabavq (1, b, c);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_s32.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vabav.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint32_t a, int32x4_t b, int32x4_t c)
+ {
+ return vabavq_s32 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vabav.s32" } } */
+
++/*
++**foo1:
++** ...
++** vabav.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint32_t a, int32x4_t b, int32x4_t c)
+ {
+ return vabavq (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vabav.s32" } } */
++/*
++**foo2:
++** ...
++** vabav.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++uint32_t
++foo2 (int32x4_t b, int32x4_t c)
++{
++ return vabavq (1, b, c);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_s8.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vabav.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint32_t a, int8x16_t b, int8x16_t c)
+ {
+ return vabavq_s8 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vabav.s8" } } */
+
++/*
++**foo1:
++** ...
++** vabav.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint32_t a, int8x16_t b, int8x16_t c)
+ {
+ return vabavq (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vabav.s8" } } */
++/*
++**foo2:
++** ...
++** vabav.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++uint32_t
++foo2 (int8x16_t b, int8x16_t c)
++{
++ return vabavq (1, b, c);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_u16.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vabav.u16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint32_t a, uint16x8_t b, uint16x8_t c)
+ {
+ return vabavq_u16 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vabav.u16" } } */
+
++/*
++**foo1:
++** ...
++** vabav.u16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint32_t a, uint16x8_t b, uint16x8_t c)
+ {
+ return vabavq (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vabav.u16" } } */
++/*
++**foo2:
++** ...
++** vabav.u16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++uint32_t
++foo2 (uint16x8_t b, uint16x8_t c)
++{
++ return vabavq (1, b, c);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_u32.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vabav.u32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint32_t a, uint32x4_t b, uint32x4_t c)
+ {
+ return vabavq_u32 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vabav.u32" } } */
+
++/*
++**foo1:
++** ...
++** vabav.u32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint32_t a, uint32x4_t b, uint32x4_t c)
+ {
+ return vabavq (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vabav.u32" } } */
++/*
++**foo2:
++** ...
++** vabav.u32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++uint32_t
++foo2 (uint32x4_t b, uint32x4_t c)
++{
++ return vabavq (1, b, c);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_u8.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vabav.u8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint32_t a, uint8x16_t b, uint8x16_t c)
+ {
+ return vabavq_u8 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vabav.u8" } } */
+
++/*
++**foo1:
++** ...
++** vabav.u8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint32_t a, uint8x16_t b, uint8x16_t c)
+ {
+ return vabavq (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vabav.u8" } } */
++/*
++**foo2:
++** ...
++** vabav.u8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++uint32_t
++foo2 (uint8x16_t b, uint8x16_t c)
++{
++ return vabavq (1, b, c);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_f16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vabd.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b)
+ {
+ return vabdq_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vabd.f16" } } */
+
++/*
++**foo1:
++** ...
++** vabd.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b)
+ {
+ return vabdq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vabd.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vabd.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b)
+ {
+ return vabdq_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vabd.f32" } } */
+
++/*
++**foo1:
++** ...
++** vabd.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b)
+ {
+ return vabdq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vabd.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_m_f16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vabdq_m_f16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabdt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vabdq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabdt.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_m_f32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vabdq_m_f32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabdt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vabdq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabdt.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vabdq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabdt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vabdq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabdt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vabdq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabdt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vabdq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabdt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vabdq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabdt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vabdq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabdt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_m_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vabdq_m_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabdt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vabdq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabdt.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_m_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vabdq_m_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabdt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vabdq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabdt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_m_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vabdq_m_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabdt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vabdq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabdt.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vabd.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vabdq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vabd.s16" } } */
+
++/*
++**foo1:
++** ...
++** vabd.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vabdq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vabd.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vabd.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vabdq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vabd.s32" } } */
+
++/*
++**foo1:
++** ...
++** vabd.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vabdq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vabd.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vabd.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vabdq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vabd.s8" } } */
+
++/*
++**foo1:
++** ...
++** vabd.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vabdq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vabd.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vabd.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b)
+ {
+ return vabdq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vabd.u16" } } */
+
++/*
++**foo1:
++** ...
++** vabd.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b)
+ {
+ return vabdq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vabd.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vabd.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b)
+ {
+ return vabdq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vabd.u32" } } */
+
++/*
++**foo1:
++** ...
++** vabd.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b)
+ {
+ return vabdq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vabd.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vabd.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+ return vabdq_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vabd.u8" } } */
+
++/*
++**foo1:
++** ...
++** vabd.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b)
+ {
+ return vabdq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vabd.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vabdq_x_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabdt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vabdq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vabdq_x_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabdt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vabdq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vabdq_x_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabdt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vabdq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabdt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vabdq_x_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabdt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vabdq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vabdq_x_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabdt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vabdq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vabdq_x_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabdt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vabdq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vabdq_x_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabdt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vabdq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vabdq_x_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabdt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabdt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vabdq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_f16.c
+@@ -1,13 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vabs.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a)
+ {
+ return vabsq_f16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vabs.f16" } } */
++
++/*
++**foo1:
++** ...
++** vabs.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++float16x8_t
++foo1 (float16x8_t a)
++{
++ return vabsq (a);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_f32.c
+@@ -1,13 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vabs.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a)
+ {
+ return vabsq_f32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vabs.f32" } } */
++
++/*
++**foo1:
++** ...
++** vabs.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++float32x4_t
++foo1 (float32x4_t a)
++{
++ return vabsq (a);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_m_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabst.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vabsq_m_f16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabst.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabst.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vabsq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_m_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabst.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vabsq_m_f32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabst.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabst.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vabsq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_m_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabst.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vabsq_m_s16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabst.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabst.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vabsq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_m_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabst.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, mve_pred16_t p)
+ {
+ return vabsq_m_s32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabst.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabst.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, mve_pred16_t p)
+ {
+ return vabsq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_m_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabst.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vabsq_m_s8 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabst.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabst.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vabsq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_s16.c
+@@ -1,21 +1,41 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vabs.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a)
+ {
+ return vabsq_s16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vabs.s16" } } */
+
++/*
++**foo1:
++** ...
++** vabs.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a)
+ {
+ return vabsq (a);
+ }
+
+-/* { dg-final { scan-assembler "vabs.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_s32.c
+@@ -1,21 +1,41 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vabs.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a)
+ {
+ return vabsq_s32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vabs.s32" } } */
+
++/*
++**foo1:
++** ...
++** vabs.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a)
+ {
+ return vabsq (a);
+ }
+
+-/* { dg-final { scan-assembler "vabs.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vabs.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a)
+ {
+ return vabsq_s8 (a);
+ }
+
+-/* { dg-final { scan-assembler "vabs.s8" } } */
+
++/*
++**foo1:
++** ...
++** vabs.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a)
+ {
+ return vabsq (a);
+ }
+
+-/* { dg-final { scan-assembler "vabs.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_x_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_x_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabst.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, mve_pred16_t p)
+ {
+ return vabsq_x_f16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabst.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabst.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, mve_pred16_t p)
+ {
+ return vabsq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_x_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_x_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabst.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, mve_pred16_t p)
+ {
+ return vabsq_x_f32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabst.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabst.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, mve_pred16_t p)
+ {
+ return vabsq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_x_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabst.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, mve_pred16_t p)
+ {
+ return vabsq_x_s16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabst.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabst.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, mve_pred16_t p)
+ {
+ return vabsq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_x_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_x_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabst.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, mve_pred16_t p)
+ {
+ return vabsq_x_s32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabst.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabst.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, mve_pred16_t p)
+ {
+ return vabsq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_x_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabst.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, mve_pred16_t p)
+ {
+ return vabsq_x_s8 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vabst.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vabst.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, mve_pred16_t p)
+ {
+ return vabsq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_m_s32.c
+@@ -1,23 +1,57 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vadcit.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned * carry_out, mve_pred16_t p)
++foo (int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned *carry_out, mve_pred16_t p)
+ {
+ return vadciq_m_s32 (inactive, a, b, carry_out, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vadcit.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vadcit.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned * carry_out, mve_pred16_t p)
++foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned *carry_out, mve_pred16_t p)
+ {
+ return vadciq_m (inactive, a, b, carry_out, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vadcit.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_m_u32.c
+@@ -1,23 +1,57 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vadcit.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned * carry_out, mve_pred16_t p)
++foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned *carry_out, mve_pred16_t p)
+ {
+ return vadciq_m_u32 (inactive, a, b, carry_out, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vadcit.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vadcit.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned * carry_out, mve_pred16_t p)
++foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned *carry_out, mve_pred16_t p)
+ {
+ return vadciq_m (inactive, a, b, carry_out, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vadcit.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vadci.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int32x4_t a, int32x4_t b, unsigned * carry_out)
++foo (int32x4_t a, int32x4_t b, unsigned *carry_out)
+ {
+ return vadciq_s32 (a, b, carry_out);
+ }
+
+-/* { dg-final { scan-assembler "vadci.i32" } } */
+
++/*
++**foo1:
++** ...
++** vadci.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int32x4_t a, int32x4_t b, unsigned * carry_out)
++foo1 (int32x4_t a, int32x4_t b, unsigned *carry_out)
+ {
+ return vadciq (a, b, carry_out);
+ }
+
+-/* { dg-final { scan-assembler "vadci.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_u32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vadci.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint32x4_t a, uint32x4_t b, unsigned * carry_out)
++foo (uint32x4_t a, uint32x4_t b, unsigned *carry_out)
+ {
+ return vadciq_u32 (a, b, carry_out);
+ }
+
+-/* { dg-final { scan-assembler "vadci.i32" } } */
+
++/*
++**foo1:
++** ...
++** vadci.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (uint32x4_t a, uint32x4_t b, unsigned * carry_out)
++foo1 (uint32x4_t a, uint32x4_t b, unsigned *carry_out)
+ {
+ return vadciq (a, b, carry_out);
+ }
+
+-/* { dg-final { scan-assembler "vadci.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_m_s32.c
+@@ -1,23 +1,77 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** lsls (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29(?: @.*|)
++** ...
++** and (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #536870912(?: @.*|)
++** ...
++** orrs (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmsr FPSCR_nzcvqc, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vadct.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned * carry, mve_pred16_t p)
++foo (int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned *carry, mve_pred16_t p)
+ {
+ return vadcq_m_s32 (inactive, a, b, carry, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vadct.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** lsls (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29(?: @.*|)
++** ...
++** and (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #536870912(?: @.*|)
++** ...
++** orrs (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmsr FPSCR_nzcvqc, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vadct.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned * carry, mve_pred16_t p)
++foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned *carry, mve_pred16_t p)
+ {
+ return vadcq_m (inactive, a, b, carry, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vadct.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_m_u32.c
+@@ -1,23 +1,77 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** lsls (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29(?: @.*|)
++** ...
++** and (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #536870912(?: @.*|)
++** ...
++** orrs (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmsr FPSCR_nzcvqc, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vadct.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned * carry, mve_pred16_t p)
++foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned *carry, mve_pred16_t p)
+ {
+ return vadcq_m_u32 (inactive, a, b, carry, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vadct.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** lsls (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29(?: @.*|)
++** ...
++** and (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #536870912(?: @.*|)
++** ...
++** orrs (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmsr FPSCR_nzcvqc, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vadct.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned * carry, mve_pred16_t p)
++foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned *carry, mve_pred16_t p)
+ {
+ return vadcq_m (inactive, a, b, carry, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vadct.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_s32.c
+@@ -1,21 +1,69 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** lsls (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29(?: @.*|)
++** ...
++** and (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #536870912(?: @.*|)
++** ...
++** orrs (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmsr FPSCR_nzcvqc, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vadc.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int32x4_t a, int32x4_t b, unsigned * carry)
++foo (int32x4_t a, int32x4_t b, unsigned *carry)
+ {
+ return vadcq_s32 (a, b, carry);
+ }
+
+-/* { dg-final { scan-assembler "vadc.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** lsls (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29(?: @.*|)
++** ...
++** and (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #536870912(?: @.*|)
++** ...
++** orrs (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmsr FPSCR_nzcvqc, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vadc.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int32x4_t a, int32x4_t b, unsigned * carry)
++foo1 (int32x4_t a, int32x4_t b, unsigned *carry)
+ {
+ return vadcq (a, b, carry);
+ }
+
+-/* { dg-final { scan-assembler "vadc.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_u32.c
+@@ -1,21 +1,69 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** lsls (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29(?: @.*|)
++** ...
++** and (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #536870912(?: @.*|)
++** ...
++** orrs (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmsr FPSCR_nzcvqc, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vadc.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint32x4_t a, uint32x4_t b, unsigned * carry)
++foo (uint32x4_t a, uint32x4_t b, unsigned *carry)
+ {
+ return vadcq_u32 (a, b, carry);
+ }
+
+-/* { dg-final { scan-assembler "vadc.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** lsls (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29(?: @.*|)
++** ...
++** and (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #536870912(?: @.*|)
++** ...
++** orrs (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmsr FPSCR_nzcvqc, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vadc.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (uint32x4_t a, uint32x4_t b, unsigned * carry)
++foo1 (uint32x4_t a, uint32x4_t b, unsigned *carry)
+ {
+ return vadcq (a, b, carry);
+ }
+
+-/* { dg-final { scan-assembler "vadc.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddlvaq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddlvaq_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddlvat.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int64_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vaddlvaq_p_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddlvat.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddlvat.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int64_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vaddlvaq_p (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddlvat.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddlvaq_p_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddlvaq_p_u32.c
+@@ -1,21 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddlvat.u32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+ foo (uint64_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vaddlvaq_p_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddlvat.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddlvat.u32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+ foo1 (uint64_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vaddlvaq_p (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddlvat.u32" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddlvat.u32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
++uint64_t
++foo2 (uint32x4_t b, mve_pred16_t p)
++{
++ return vaddlvaq_p (1, b, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddlvaq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddlvaq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vaddlva.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int64_t a, int32x4_t b)
+ {
+ return vaddlvaq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vaddlva.s32" } } */
+
++/*
++**foo1:
++** ...
++** vaddlva.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int64_t a, int32x4_t b)
+ {
+ return vaddlvaq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vaddlva.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddlvaq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddlvaq_u32.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vaddlva.u32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+ foo (uint64_t a, uint32x4_t b)
+ {
+ return vaddlvaq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vaddlva.u32" } } */
+
++/*
++**foo1:
++** ...
++** vaddlva.u32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+ foo1 (uint64_t a, uint32x4_t b)
+ {
+ return vaddlvaq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vaddlva.u32" } } */
++/*
++**foo2:
++** ...
++** vaddlva.u32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
++uint64_t
++foo2 (uint32x4_t b)
++{
++ return vaddlvaq (1, b);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddlvq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddlvq_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddlvt.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int32x4_t a, mve_pred16_t p)
+ {
+ return vaddlvq_p_s32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddlvt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddlvt.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int32x4_t a, mve_pred16_t p)
+ {
+ return vaddlvq_p (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddlvt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddlvq_p_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddlvq_p_u32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddlvt.u32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+ foo (uint32x4_t a, mve_pred16_t p)
+ {
+ return vaddlvq_p_u32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddlvt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddlvt.u32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+ foo1 (uint32x4_t a, mve_pred16_t p)
+ {
+ return vaddlvq_p (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddlvt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddlvq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddlvq_s32.c
+@@ -1,21 +1,41 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vaddlv.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int32x4_t a)
+ {
+ return vaddlvq_s32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vaddlv.s32" } } */
+
++/*
++**foo1:
++** ...
++** vaddlv.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int32x4_t a)
+ {
+- return vaddlvq_s32 (a);
++ return vaddlvq (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vaddlv.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddlvq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddlvq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vaddlv.u32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+ foo (uint32x4_t a)
+ {
+- return vaddlvq_u32 (a);
++ return vaddlvq_u32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vaddlv.u32" } } */
+
++/*
++**foo1:
++** ...
++** vaddlv.u32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+ foo1 (uint32x4_t a)
+ {
+- return vaddlvq (a);
++ return vaddlvq (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vaddlv.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vadd.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b)
+ {
+ return vaddq_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.f16" } } */
+
++/*
++**foo1:
++** ...
++** vadd.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b)
+ {
+ return vaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vadd.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b)
+ {
+ return vaddq_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.f32" } } */
+
++/*
++**foo1:
++** ...
++** vadd.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b)
+ {
+ return vaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_f16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vaddq_m_f16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_f32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vaddq_m_f32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_n_f16-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float16x8_t
+-foo1 (float16x8_t inactive, float16x8_t a, float16_t b, mve_pred16_t p)
+-{
+- return vaddq_m (inactive, a, 23.23, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_n_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_n_f16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16x8_t a, float16_t b, mve_pred16_t p)
+ {
+ return vaddq_m_n_f16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16x8_t a, float16_t b, mve_pred16_t p)
+ {
+ return vaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.f16" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float16x8_t
++foo2 (float16x8_t inactive, float16x8_t a, mve_pred16_t p)
++{
++ return vaddq_m (inactive, a, 1.1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_n_f32-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float32x4_t
+-foo1 (float32x4_t inactive, float32x4_t a, float32_t b, mve_pred16_t p)
+-{
+- return vaddq_m (inactive, a, 23.23, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_n_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_n_f32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float32x4_t a, float32_t b, mve_pred16_t p)
+ {
+ return vaddq_m_n_f32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, float32x4_t a, float32_t b, mve_pred16_t p)
+ {
+ return vaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.f32" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float32x4_t
++foo2 (float32x4_t inactive, float32x4_t a, mve_pred16_t p)
++{
++ return vaddq_m (inactive, a, 1.1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vaddq_m_n_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vaddq_m_n_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_n_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vaddq_m_n_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_n_u16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, uint16_t b, mve_pred16_t p)
+ {
+ return vaddq_m_n_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, uint16_t b, mve_pred16_t p)
+ {
+ return vaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i16" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t inactive, uint16x8_t a, mve_pred16_t p)
++{
++ return vaddq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_n_u32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vaddq_m_n_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i32" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t inactive, uint32x4_t a, mve_pred16_t p)
++{
++ return vaddq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_n_u8.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, uint8_t b, mve_pred16_t p)
+ {
+ return vaddq_m_n_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, uint8_t b, mve_pred16_t p)
+ {
+ return vaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i8" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
++{
++ return vaddq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vaddq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vaddq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vaddq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vaddq_m_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vaddq_m_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_m_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vaddq_m_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_f16.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vadd.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16_t b)
+ {
+ return vaddq_n_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.f16" } } */
+
++/*
++**foo1:
++** ...
++** vadd.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16_t b)
+ {
+ return vaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.f16" } } */
++/*
++**foo2:
++** ...
++** vadd.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float16x8_t
++foo2 (float16x8_t a)
++{
++ return vaddq (a, 1.1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_f32.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vadd.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32_t b)
+ {
+ return vaddq_n_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.f32" } } */
+
++/*
++**foo1:
++** ...
++** vadd.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32_t b)
+ {
+ return vaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.f32" } } */
++/*
++**foo2:
++** ...
++** vadd.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float32x4_t
++foo2 (float32x4_t a)
++{
++ return vaddq (a, 1.1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vadd.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16_t b)
+ {
+ return vaddq_n_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.i16" } } */
+
++/*
++**foo1:
++** ...
++** vadd.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16_t b)
+ {
+ return vaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vadd.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32_t b)
+ {
+ return vaddq_n_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.i32" } } */
+
++/*
++**foo1:
++** ...
++** vadd.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32_t b)
+ {
+ return vaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vadd.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8_t b)
+ {
+ return vaddq_n_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.i8" } } */
+
++/*
++**foo1:
++** ...
++** vadd.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8_t b)
+ {
+ return vaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_u16.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vadd.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16_t b)
+ {
+ return vaddq_n_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.i16" } } */
+
++/*
++**foo1:
++** ...
++** vadd.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16_t b)
+ {
+ return vaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.i16" } } */
++/*
++**foo2:
++** ...
++** vadd.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t a)
++{
++ return vaddq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_u32.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vadd.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32_t b)
+ {
+ return vaddq_n_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.i32" } } */
+
++/*
++**foo1:
++** ...
++** vadd.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32_t b)
+ {
+ return vaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.i32" } } */
++/*
++**foo2:
++** ...
++** vadd.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t a)
++{
++ return vaddq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_u8.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vadd.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8_t b)
+ {
+ return vaddq_n_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.i8" } } */
+
++/*
++**foo1:
++** ...
++** vadd.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8_t b)
+ {
+ return vaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.i8" } } */
++/*
++**foo2:
++** ...
++** vadd.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t a)
++{
++ return vaddq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vadd.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vaddq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.i16" } } */
+
++/*
++**foo1:
++** ...
++** vadd.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vadd.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vaddq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.i32" } } */
+
++/*
++**foo1:
++** ...
++** vadd.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vadd.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vaddq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.i8" } } */
+
++/*
++**foo1:
++** ...
++** vadd.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vadd.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b)
+ {
+ return vaddq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.i16" } } */
+
++/*
++**foo1:
++** ...
++** vadd.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b)
+ {
+ return vaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vadd.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b)
+ {
+ return vaddq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.i32" } } */
+
++/*
++**foo1:
++** ...
++** vadd.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b)
+ {
+ return vaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vadd.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+ return vaddq_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.i8" } } */
+
++/*
++**foo1:
++** ...
++** vadd.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b)
+ {
+ return vaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vadd.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_f16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vaddq_x_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_f32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vaddq_x_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_f16-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float16x8_t
+-foo1 (float16x8_t a, float16_t b, mve_pred16_t p)
+-{
+- return vaddq_x (a, 23.23, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_f16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16_t b, mve_pred16_t p)
+ {
+ return vaddq_x_n_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16_t b, mve_pred16_t p)
+ {
+ return vaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.f16" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float16x8_t
++foo2 (float16x8_t a, mve_pred16_t p)
++{
++ return vaddq_x (a, 1.1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_f32-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float32x4_t
+-foo1 (float32x4_t a, float32_t b, mve_pred16_t p)
+-{
+- return vaddq_x (a, 23.23, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_f32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32_t b, mve_pred16_t p)
+ {
+ return vaddq_x_n_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32_t b, mve_pred16_t p)
+ {
+ return vaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.f32" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float32x4_t
++foo2 (float32x4_t a, mve_pred16_t p)
++{
++ return vaddq_x (a, 1.1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vaddq_x_n_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vaddq_x_n_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vaddq_x_n_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_u16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16_t b, mve_pred16_t p)
+ {
+ return vaddq_x_n_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16_t b, mve_pred16_t p)
+ {
+ return vaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i16" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t a, mve_pred16_t p)
++{
++ return vaddq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_u32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vaddq_x_n_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i32" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t a, mve_pred16_t p)
++{
++ return vaddq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_u8.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8_t b, mve_pred16_t p)
+ {
+ return vaddq_x_n_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8_t b, mve_pred16_t p)
+ {
+ return vaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i8" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t a, mve_pred16_t p)
++{
++ return vaddq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vaddq_x_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vaddq_x_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vaddq_x_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vaddq_x_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vaddq_x_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vaddq_x_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddt.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vaddt.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_s16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddvat.s16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int32_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vaddvaq_p_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddvat.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddvat.s16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int32_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vaddvaq_p (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddvat.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddvat.s32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int32_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vaddvaq_p_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddvat.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddvat.s32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int32_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vaddvaq_p (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddvat.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_s8.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddvat.s8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int32_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vaddvaq_p_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddvat.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddvat.s8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int32_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vaddvaq_p (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddvat.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_u16.c
+@@ -1,21 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddvat.u16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint32_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vaddvaq_p_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddvat.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddvat.u16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint32_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vaddvaq_p (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddvat.u16" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddvat.u16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
++uint32_t
++foo2 (uint16x8_t b, mve_pred16_t p)
++{
++ return vaddvaq_p (1, b, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_u32.c
+@@ -1,21 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddvat.u32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint32_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vaddvaq_p_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddvat.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddvat.u32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint32_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vaddvaq_p (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddvat.u32" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddvat.u32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
++uint32_t
++foo2 (uint32x4_t b, mve_pred16_t p)
++{
++ return vaddvaq_p (1, b, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_u8.c
+@@ -1,21 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddvat.u8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint32_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vaddvaq_p_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddvat.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddvat.u8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint32_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vaddvaq_p (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddvat.u8" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddvat.u8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
++uint32_t
++foo2 (uint8x16_t b, mve_pred16_t p)
++{
++ return vaddvaq_p (1, b, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vaddva.s16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int32_t a, int16x8_t b)
+ {
+ return vaddvaq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vaddva.s16" } } */
+
++/*
++**foo1:
++** ...
++** vaddva.s16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int32_t a, int16x8_t b)
+ {
+ return vaddvaq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vaddva.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vaddva.s32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int32_t a, int32x4_t b)
+ {
+ return vaddvaq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vaddva.s32" } } */
+
++/*
++**foo1:
++** ...
++** vaddva.s32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int32_t a, int32x4_t b)
+ {
+ return vaddvaq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vaddva.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vaddva.s8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int32_t a, int8x16_t b)
+ {
+ return vaddvaq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vaddva.s8" } } */
+
++/*
++**foo1:
++** ...
++** vaddva.s8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int32_t a, int8x16_t b)
+ {
+ return vaddvaq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vaddva.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_u16.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vaddva.u16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint32_t a, uint16x8_t b)
+ {
+ return vaddvaq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vaddva.u16" } } */
+
++/*
++**foo1:
++** ...
++** vaddva.u16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint32_t a, uint16x8_t b)
+ {
+ return vaddvaq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vaddva.u16" } } */
++/*
++**foo2:
++** ...
++** vaddva.u16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
++uint32_t
++foo2 (uint16x8_t b)
++{
++ return vaddvaq (1, b);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_u32.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vaddva.u32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint32_t a, uint32x4_t b)
+ {
+ return vaddvaq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vaddva.u32" } } */
+
++/*
++**foo1:
++** ...
++** vaddva.u32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint32_t a, uint32x4_t b)
+ {
+ return vaddvaq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vaddva.u32" } } */
++/*
++**foo2:
++** ...
++** vaddva.u32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
++uint32_t
++foo2 (uint32x4_t b)
++{
++ return vaddvaq (1, b);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_u8.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vaddva.u8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint32_t a, uint8x16_t b)
+ {
+ return vaddvaq_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vaddva.u8" } } */
+
++/*
++**foo1:
++** ...
++** vaddva.u8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint32_t a, uint8x16_t b)
+ {
+ return vaddvaq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vaddva.u8" } } */
++/*
++**foo2:
++** ...
++** vaddva.u8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
++uint32_t
++foo2 (uint8x16_t b)
++{
++ return vaddvaq (1, b);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_s16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddvt.s16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int16x8_t a, mve_pred16_t p)
+ {
+ return vaddvq_p_s16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddvt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddvt.s16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int16x8_t a, mve_pred16_t p)
+ {
+ return vaddvq_p (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddvt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddvt.s32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int32x4_t a, mve_pred16_t p)
+ {
+ return vaddvq_p_s32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddvt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddvt.s32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int32x4_t a, mve_pred16_t p)
+ {
+ return vaddvq_p (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddvt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_s8.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddvt.s8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int8x16_t a, mve_pred16_t p)
+ {
+ return vaddvq_p_s8 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddvt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddvt.s8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int8x16_t a, mve_pred16_t p)
+ {
+ return vaddvq_p (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddvt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_u16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddvt.u16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint16x8_t a, mve_pred16_t p)
+ {
+ return vaddvq_p_u16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddvt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddvt.u16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint16x8_t a, mve_pred16_t p)
+ {
+ return vaddvq_p (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddvt.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_u32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddvt.u32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint32x4_t a, mve_pred16_t p)
+ {
+ return vaddvq_p_u32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddvt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddvt.u32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint32x4_t a, mve_pred16_t p)
+ {
+ return vaddvq_p (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddvt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_u8.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddvt.u8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint8x16_t a, mve_pred16_t p)
+ {
+ return vaddvq_p_u8 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddvt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vaddvt.u8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint8x16_t a, mve_pred16_t p)
+ {
+ return vaddvq_p (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vaddvt.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_s16.c
+@@ -1,21 +1,41 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vaddv.s16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int16x8_t a)
+ {
+ return vaddvq_s16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vaddv.s16" } } */
+
++/*
++**foo1:
++** ...
++** vaddv.s16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int16x8_t a)
+ {
+- return vaddvq_s16 (a);
++ return vaddvq (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vaddv.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_s32.c
+@@ -1,21 +1,41 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vaddv.s32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int32x4_t a)
+ {
+ return vaddvq_s32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vaddv.s32" } } */
+
++/*
++**foo1:
++** ...
++** vaddv.s32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int32x4_t a)
+ {
+- return vaddvq_s32 (a);
++ return vaddvq (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vaddv.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_s8.c
+@@ -1,21 +1,41 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vaddv.s8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int8x16_t a)
+ {
+ return vaddvq_s8 (a);
+ }
+
+-/* { dg-final { scan-assembler "vaddv.s8" } } */
+
++/*
++**foo1:
++** ...
++** vaddv.s8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int8x16_t a)
+ {
+ return vaddvq (a);
+ }
+
+-/* { dg-final { scan-assembler "vaddv.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vaddv.u16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint16x8_t a)
+ {
+- return vaddvq_u16 (a);
++ return vaddvq_u16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vaddv.u16" } } */
+
++/*
++**foo1:
++** ...
++** vaddv.u16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint16x8_t a)
+ {
+- return vaddvq (a);
++ return vaddvq (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vaddv.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vaddv.u32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint32x4_t a)
+ {
+- return vaddvq_u32 (a);
++ return vaddvq_u32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vaddv.u32" } } */
+
++/*
++**foo1:
++** ...
++** vaddv.u32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint32x4_t a)
+ {
+- return vaddvq (a);
++ return vaddvq (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vaddv.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vaddv.u8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint8x16_t a)
+ {
+- return vaddvq_u8 (a);
++ return vaddvq_u8 (a);
+ }
+
+-/* { dg-final { scan-assembler "vaddv.u8" } } */
+
++/*
++**foo1:
++** ...
++** vaddv.u8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint8x16_t a)
+ {
+- return vaddvq (a);
++ return vaddvq (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vaddv.u8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_f16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vand q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b)
+ {
+ return vandq_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vand" } } */
+
++/*
++**foo1:
++** ...
++** vand q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b)
+ {
+ return vandq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vand" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vand q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b)
+ {
+ return vandq_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vand" } } */
+
++/*
++**foo1:
++** ...
++** vand q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b)
+ {
+ return vandq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vand" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_m_f16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vandq_m_f16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vandt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vandq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vandt" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_m_f32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vandq_m_f32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vandt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vandq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vandt" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vandq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vandt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vandq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vandt" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vandq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vandt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vandq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vandt" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vandq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vandt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vandq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vandt" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_m_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vandq_m_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vandt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vandq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vandt" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_m_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vandq_m_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vandt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vandq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vandt" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_m_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vandq_m_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vandt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vandq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vandt" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vand q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vandq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vand" } } */
+
++/*
++**foo1:
++** ...
++** vand q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vandq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vand" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vand q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vandq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vand" } } */
+
++/*
++**foo1:
++** ...
++** vand q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vandq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vand" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vand q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vandq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vand" } } */
+
++/*
++**foo1:
++** ...
++** vand q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vandq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vand" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vand q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b)
+ {
+ return vandq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vand" } } */
+
++/*
++**foo1:
++** ...
++** vand q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b)
+ {
+ return vandq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vand" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vand q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b)
+ {
+ return vandq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vand" } } */
+
++/*
++**foo1:
++** ...
++** vand q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b)
+ {
+ return vandq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vand" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vand q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+ return vandq_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vand" } } */
+
++/*
++**foo1:
++** ...
++** vand q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b)
+ {
+ return vandq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vand" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vandq_x_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vandt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vandq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vandq_x_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vandt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vandq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vandq_x_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vandt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vandq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vandq_x_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vandt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vandq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vandq_x_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vandt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vandq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vandq_x_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vandt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vandq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vandq_x_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vandt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vandq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vandq_x_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vandt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vandt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vandq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_f16.c
+@@ -1,22 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vbic q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b)
+ {
+ return vbicq_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbic" } } */
+
++/*
++**foo1:
++** ...
++** vbic q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b)
+ {
+ return vbicq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbic" } } */
++#ifdef __cplusplus
++}
++#endif
++
+ /* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_f32.c
+@@ -1,22 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vbic q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b)
+ {
+ return vbicq_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbic" } } */
+
++/*
++**foo1:
++** ...
++** vbic q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b)
+ {
+ return vbicq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbic" } } */
++#ifdef __cplusplus
++}
++#endif
++
+ /* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_f16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vbicq_m_f16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbict" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vbicq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbict" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_f32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vbicq_m_f32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbict" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vbicq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbict" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_n_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict.i16 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, mve_pred16_t p)
+ {
+- return vbicq_m_n_s16 (a, 16, p);
++ return vbicq_m_n_s16 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbict.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict.i16 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, mve_pred16_t p)
+ {
+- return vbicq_m_n (a, 16, p);
++ return vbicq_m_n (a, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_n_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict.i32 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, mve_pred16_t p)
+ {
+ return vbicq_m_n_s32 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbict.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict.i32 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, mve_pred16_t p)
+ {
+ return vbicq_m_n (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_n_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict.i16 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, mve_pred16_t p)
+ {
+ return vbicq_m_n_u16 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbict.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict.i16 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, mve_pred16_t p)
+ {
+ return vbicq_m_n (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_n_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict.i32 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, mve_pred16_t p)
+ {
+ return vbicq_m_n_u32 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbict.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict.i32 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, mve_pred16_t p)
+ {
+ return vbicq_m_n (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vbicq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbict" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vbicq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbict" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vbicq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbict" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vbicq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbict" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vbicq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbict" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vbicq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbict" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vbicq_m_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbict" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vbicq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbict" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vbicq_m_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbict" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vbicq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbict" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vbicq_m_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbict" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vbicq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbict" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_n_s16.c
+@@ -1,20 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vbic.i16 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a)
+ {
+ return vbicq_n_s16 (a, 1);
+ }
+
++
++/*
++**foo1:
++** ...
++** vbic.i16 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a)
+ {
+ return vbicq (a, 1);
+ }
+
+-/* { dg-final { scan-assembler-times "vbic.i16" 2 } } */
++#ifdef __cplusplus
++}
++#endif
++
+ /* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_n_s32.c
+@@ -1,20 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vbic.i32 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a)
+ {
+ return vbicq_n_s32 (a, 1);
+ }
+
++
++/*
++**foo1:
++** ...
++** vbic.i32 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a)
+ {
+ return vbicq (a, 1);
+ }
+
+-/* { dg-final { scan-assembler-times "vbic.i32" 2 } } */
++#ifdef __cplusplus
++}
++#endif
++
+ /* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_n_u16.c
+@@ -1,20 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vbic.i16 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a)
+ {
+ return vbicq_n_u16 (a, 1);
+ }
+
++
++/*
++**foo1:
++** ...
++** vbic.i16 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a)
+ {
+ return vbicq (a, 1);
+ }
+
+-/* { dg-final { scan-assembler-times "vbic.i16" 2 } } */
++#ifdef __cplusplus
++}
++#endif
++
+ /* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_n_u32.c
+@@ -1,20 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vbic.i32 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a)
+ {
+ return vbicq_n_u32 (a, 1);
+ }
+
++
++/*
++**foo1:
++** ...
++** vbic.i32 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a)
+ {
+ return vbicq (a, 1);
+ }
+
+-/* { dg-final { scan-assembler-times "vbic.i32" 2 } } */
++#ifdef __cplusplus
++}
++#endif
++
+ /* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_s16.c
+@@ -1,22 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vbic q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vbicq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbic" } } */
+
++/*
++**foo1:
++** ...
++** vbic q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vbicq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbic" } } */
++#ifdef __cplusplus
++}
++#endif
++
+ /* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_s32.c
+@@ -1,22 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vbic q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vbicq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbic" } } */
+
++/*
++**foo1:
++** ...
++** vbic q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vbicq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbic" } } */
++#ifdef __cplusplus
++}
++#endif
++
+ /* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_s8.c
+@@ -1,22 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vbic q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vbicq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbic" } } */
+
++/*
++**foo1:
++** ...
++** vbic q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vbicq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbic" } } */
++#ifdef __cplusplus
++}
++#endif
++
+ /* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_u16.c
+@@ -1,22 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vbic q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b)
+ {
+ return vbicq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbic" } } */
+
++/*
++**foo1:
++** ...
++** vbic q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b)
+ {
+ return vbicq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbic" } } */
++#ifdef __cplusplus
++}
++#endif
++
+ /* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_u32.c
+@@ -1,22 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vbic q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b)
+ {
+ return vbicq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbic" } } */
+
++/*
++**foo1:
++** ...
++** vbic q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b)
+ {
+ return vbicq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbic" } } */
++#ifdef __cplusplus
++}
++#endif
++
+ /* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_u8.c
+@@ -1,22 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vbic q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+ return vbicq_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbic" } } */
+
++/*
++**foo1:
++** ...
++** vbic q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b)
+ {
+ return vbicq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbic" } } */
++#ifdef __cplusplus
++}
++#endif
++
+ /* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vbicq_x_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbict" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vbicq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vbicq_x_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbict" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vbicq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vbicq_x_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbict" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vbicq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vbicq_x_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbict" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vbicq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vbicq_x_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbict" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vbicq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vbicq_x_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbict" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vbicq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vbicq_x_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbict" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vbicq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vbicq_x_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbict" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbict q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vbicq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_m_n_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_m_n_f16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16x8_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_m_n_f16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16x8_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_m_n_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_m_n_f32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_m_n_f32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, float32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_m_n_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_m_n_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_m_n_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_m_n_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_m_n_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_m_n_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_m_n_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_m_n_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_m_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_m_n_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_m_n_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_f16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vbrsr.16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, int32_t b)
+ {
+ return vbrsrq_n_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbrsr.16" } } */
+
++/*
++**foo1:
++** ...
++** vbrsr.16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, int32_t b)
+ {
+ return vbrsrq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbrsr.16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vbrsr.32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, int32_t b)
+ {
+ return vbrsrq_n_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbrsr.32" } } */
+
++/*
++**foo1:
++** ...
++** vbrsr.32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, int32_t b)
+ {
+ return vbrsrq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbrsr.32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vbrsr.16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32_t b)
+ {
+ return vbrsrq_n_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbrsr.16" } } */
+
++/*
++**foo1:
++** ...
++** vbrsr.16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32_t b)
+ {
+ return vbrsrq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbrsr.16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vbrsr.32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32_t b)
+ {
+ return vbrsrq_n_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbrsr.32" } } */
+
++/*
++**foo1:
++** ...
++** vbrsr.32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32_t b)
+ {
+ return vbrsrq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbrsr.32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vbrsr.8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int32_t b)
+ {
+ return vbrsrq_n_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbrsr.8" } } */
+
++/*
++**foo1:
++** ...
++** vbrsr.8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int32_t b)
+ {
+ return vbrsrq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbrsr.8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vbrsr.16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int32_t b)
+ {
+ return vbrsrq_n_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbrsr.16" } } */
+
++/*
++**foo1:
++** ...
++** vbrsr.16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, int32_t b)
+ {
+ return vbrsrq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbrsr.16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vbrsr.32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, int32_t b)
+ {
+ return vbrsrq_n_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbrsr.32" } } */
+
++/*
++**foo1:
++** ...
++** vbrsr.32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, int32_t b)
+ {
+ return vbrsrq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbrsr.32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vbrsr.8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int32_t b)
+ {
+ return vbrsrq_n_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbrsr.8" } } */
+
++/*
++**foo1:
++** ...
++** vbrsr.8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, int32_t b)
+ {
+ return vbrsrq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vbrsr.8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_f16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_x_n_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_f32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_x_n_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_x_n_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_x_n_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_x_n_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_x_n_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_x_n_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_x_n_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vbrsrt.8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, int32_t b, mve_pred16_t p)
+ {
+ return vbrsrq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vbrsrt.8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_f16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcadd.f16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b)
+ {
+ return vcaddq_rot270_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.f16" } } */
+
++/*
++**foo1:
++** ...
++** vcadd.f16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b)
+ {
+ return vcaddq_rot270 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcadd.f32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b)
+ {
+ return vcaddq_rot270_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.f32" } } */
+
++/*
++**foo1:
++** ...
++** vcadd.f32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b)
+ {
+ return vcaddq_rot270 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_m_f16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.f16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_m_f16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.f16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_m_f32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.f32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_m_f32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.f32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i8 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i8 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_m_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_m_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_m_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_m_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_m_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i8 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_m_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i8 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcadd.i16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vcaddq_rot270_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.i16" } } */
+
++/*
++**foo1:
++** ...
++** vcadd.i16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vcaddq_rot270 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcadd.i32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vcaddq_rot270_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.i32" } } */
+
++/*
++**foo1:
++** ...
++** vcadd.i32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vcaddq_rot270 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcadd.i8 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vcaddq_rot270_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.i8" } } */
+
++/*
++**foo1:
++** ...
++** vcadd.i8 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vcaddq_rot270 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcadd.i16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b)
+ {
+ return vcaddq_rot270_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.i16" } } */
+
++/*
++**foo1:
++** ...
++** vcadd.i16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b)
+ {
+ return vcaddq_rot270 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcadd.i32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b)
+ {
+ return vcaddq_rot270_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.i32" } } */
+
++/*
++**foo1:
++** ...
++** vcadd.i32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b)
+ {
+ return vcaddq_rot270 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcadd.i8 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+ return vcaddq_rot270_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.i8" } } */
+
++/*
++**foo1:
++** ...
++** vcadd.i8 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b)
+ {
+ return vcaddq_rot270 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.f16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_x_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.f16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.f32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_x_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.f32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_x_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_x_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i8 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_x_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i8 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_x_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_x_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i8 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_x_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i8 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot270_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_f16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcadd.f16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b)
+ {
+ return vcaddq_rot90_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.f16" } } */
+
++/*
++**foo1:
++** ...
++** vcadd.f16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b)
+ {
+ return vcaddq_rot90 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcadd.f32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b)
+ {
+ return vcaddq_rot90_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.f32" } } */
+
++/*
++**foo1:
++** ...
++** vcadd.f32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b)
+ {
+ return vcaddq_rot90 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_m_f16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.f16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_m_f16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.f16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_m_f32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.f32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_m_f32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.f32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i8 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i8 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_m_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_m_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_m_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_m_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_m_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i8 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_m_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i8 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcadd.i16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vcaddq_rot90_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.i16" } } */
+
++/*
++**foo1:
++** ...
++** vcadd.i16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vcaddq_rot90 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcadd.i32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vcaddq_rot90_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.i32" } } */
+
++/*
++**foo1:
++** ...
++** vcadd.i32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vcaddq_rot90 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcadd.i8 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vcaddq_rot90_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.i8" } } */
+
++/*
++**foo1:
++** ...
++** vcadd.i8 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vcaddq_rot90 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcadd.i16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b)
+ {
+ return vcaddq_rot90_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.i16" } } */
+
++/*
++**foo1:
++** ...
++** vcadd.i16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b)
+ {
+ return vcaddq_rot90 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcadd.i32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b)
+ {
+ return vcaddq_rot90_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.i32" } } */
+
++/*
++**foo1:
++** ...
++** vcadd.i32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b)
+ {
+ return vcaddq_rot90 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcadd.i8 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+ return vcaddq_rot90_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.i8" } } */
+
++/*
++**foo1:
++** ...
++** vcadd.i8 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b)
+ {
+ return vcaddq_rot90 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcadd.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.f16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_x_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.f16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.f32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_x_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.f32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_x_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_x_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i8 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_x_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i8 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_x_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_x_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i8 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_x_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcaddt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcaddt.i8 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vcaddq_rot90_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_m_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclst.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vclsq_m_s16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vclst.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclst.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vclsq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_m_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclst.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, mve_pred16_t p)
+ {
+ return vclsq_m_s32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vclst.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclst.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, mve_pred16_t p)
+ {
+ return vclsq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_m_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclst.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vclsq_m_s8 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vclst.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclst.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vclsq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_s16.c
+@@ -1,21 +1,41 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcls.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a)
+ {
+ return vclsq_s16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vcls.s16" } } */
+
++/*
++**foo1:
++** ...
++** vcls.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a)
+ {
+ return vclsq (a);
+ }
+
+-/* { dg-final { scan-assembler "vcls.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_s32.c
+@@ -1,21 +1,41 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcls.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a)
+ {
+ return vclsq_s32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vcls.s32" } } */
+
++/*
++**foo1:
++** ...
++** vcls.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a)
+ {
+ return vclsq (a);
+ }
+
+-/* { dg-final { scan-assembler "vcls.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcls.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a)
+ {
+ return vclsq_s8 (a);
+ }
+
+-/* { dg-final { scan-assembler "vcls.s8" } } */
+
++/*
++**foo1:
++** ...
++** vcls.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a)
+ {
+ return vclsq (a);
+ }
+
+-/* { dg-final { scan-assembler "vcls.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_x_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclst.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, mve_pred16_t p)
+ {
+ return vclsq_x_s16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vclst.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclst.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, mve_pred16_t p)
+ {
+ return vclsq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_x_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_x_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclst.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, mve_pred16_t p)
+ {
+ return vclsq_x_s32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vclst.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclst.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, mve_pred16_t p)
+ {
+ return vclsq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_x_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclst.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, mve_pred16_t p)
+ {
+ return vclsq_x_s8 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vclst.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclst.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, mve_pred16_t p)
+ {
+ return vclsq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclzt.i16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vclzq_m_s16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vclzt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclzt.i16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vclzq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclzt.i32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, mve_pred16_t p)
+ {
+ return vclzq_m_s32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vclzt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclzt.i32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, mve_pred16_t p)
+ {
+ return vclzq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclzt.i8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vclzq_m_s8 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vclzt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclzt.i8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vclzq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclzt.i16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, mve_pred16_t p)
+ {
+ return vclzq_m_u16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vclzt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclzt.i16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, mve_pred16_t p)
+ {
+ return vclzq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclzt.i32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, mve_pred16_t p)
+ {
+ return vclzq_m_u32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vclzt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclzt.i32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, mve_pred16_t p)
+ {
+ return vclzq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclzt.i8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
+ {
+ return vclzq_m_u8 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vclzt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclzt.i8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
+ {
+ return vclzq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_s16.c
+@@ -1,21 +1,41 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vclz.i16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a)
+ {
+ return vclzq_s16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vclz.i16" } } */
+
++/*
++**foo1:
++** ...
++** vclz.i16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a)
+ {
+ return vclzq (a);
+ }
+
+-/* { dg-final { scan-assembler "vclz.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_s32.c
+@@ -1,21 +1,41 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vclz.i32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a)
+ {
+ return vclzq_s32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vclz.i32" } } */
+
++/*
++**foo1:
++** ...
++** vclz.i32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a)
+ {
+ return vclzq (a);
+ }
+
+-/* { dg-final { scan-assembler "vclz.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vclz.i8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a)
+ {
+ return vclzq_s8 (a);
+ }
+
+-/* { dg-final { scan-assembler "vclz.i8" } } */
+
++/*
++**foo1:
++** ...
++** vclz.i8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a)
+ {
+ return vclzq (a);
+ }
+
+-/* { dg-final { scan-assembler "vclz.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vclz.i16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a)
+ {
+- return vclzq_u16 (a);
++ return vclzq_u16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vclz.i16" } } */
+
++/*
++**foo1:
++** ...
++** vclz.i16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a)
+ {
+- return vclzq (a);
++ return vclzq (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vclz.i16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vclz.i32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a)
+ {
+- return vclzq_u32 (a);
++ return vclzq_u32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vclz.i32" } } */
+
++/*
++**foo1:
++** ...
++** vclz.i32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a)
+ {
+- return vclzq (a);
++ return vclzq (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vclz.i32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vclz.i8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a)
+ {
+- return vclzq_u8 (a);
++ return vclzq_u8 (a);
+ }
+
+-/* { dg-final { scan-assembler "vclz.i8" } } */
+
++/*
++**foo1:
++** ...
++** vclz.i8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a)
+ {
+- return vclzq (a);
++ return vclzq (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vclz.i8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclzt.i16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, mve_pred16_t p)
+ {
+ return vclzq_x_s16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vclzt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclzt.i16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, mve_pred16_t p)
+ {
+ return vclzq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclzt.i32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, mve_pred16_t p)
+ {
+ return vclzq_x_s32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vclzt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclzt.i32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, mve_pred16_t p)
+ {
+ return vclzq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclzt.i8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, mve_pred16_t p)
+ {
+ return vclzq_x_s8 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vclzt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclzt.i8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, mve_pred16_t p)
+ {
+ return vclzq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclzt.i16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, mve_pred16_t p)
+ {
+ return vclzq_x_u16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vclzt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclzt.i16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, mve_pred16_t p)
+ {
+ return vclzq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclzt.i32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, mve_pred16_t p)
+ {
+ return vclzq_x_u32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vclzt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclzt.i32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, mve_pred16_t p)
+ {
+ return vclzq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclzt.i8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, mve_pred16_t p)
+ {
+ return vclzq_x_u8 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vclzt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vclzt.i8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, mve_pred16_t p)
+ {
+ return vclzq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_f16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmla.f16 q[0-9]+, q[0-9]+, q[0-9]+, #0(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b, float16x8_t c)
+ {
+ return vcmlaq_f16 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vcmla.f16" } } */
+
++/*
++**foo1:
++** ...
++** vcmla.f16 q[0-9]+, q[0-9]+, q[0-9]+, #0(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b, float16x8_t c)
+ {
+ return vcmlaq (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vcmla.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmla.f32 q[0-9]+, q[0-9]+, q[0-9]+, #0(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b, float32x4_t c)
+ {
+ return vcmlaq_f32 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vcmla.f32" } } */
+
++/*
++**foo1:
++** ...
++** vcmla.f32 q[0-9]+, q[0-9]+, q[0-9]+, #0(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b, float32x4_t c)
+ {
+ return vcmlaq (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vcmla.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_m_f16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmlat.f16 q[0-9]+, q[0-9]+, q[0-9]+, #0(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b, float16x8_t c, mve_pred16_t p)
+ {
+ return vcmlaq_m_f16 (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmlat.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmlat.f16 q[0-9]+, q[0-9]+, q[0-9]+, #0(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b, float16x8_t c, mve_pred16_t p)
+ {
+ return vcmlaq_m (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmlat.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_m_f32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmlat.f32 q[0-9]+, q[0-9]+, q[0-9]+, #0(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b, float32x4_t c, mve_pred16_t p)
+ {
+ return vcmlaq_m_f32 (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmlat.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmlat.f32 q[0-9]+, q[0-9]+, q[0-9]+, #0(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b, float32x4_t c, mve_pred16_t p)
+ {
+ return vcmlaq_m (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmlat.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_rot180_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_rot180_f16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmla.f16 q[0-9]+, q[0-9]+, q[0-9]+, #180(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b, float16x8_t c)
+ {
+ return vcmlaq_rot180_f16 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vcmla.f16" } } */
+
++/*
++**foo1:
++** ...
++** vcmla.f16 q[0-9]+, q[0-9]+, q[0-9]+, #180(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b, float16x8_t c)
+ {
+ return vcmlaq_rot180 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vcmla.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_rot180_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_rot180_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmla.f32 q[0-9]+, q[0-9]+, q[0-9]+, #180(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b, float32x4_t c)
+ {
+ return vcmlaq_rot180_f32 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vcmla.f32" } } */
+
++/*
++**foo1:
++** ...
++** vcmla.f32 q[0-9]+, q[0-9]+, q[0-9]+, #180(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b, float32x4_t c)
+ {
+ return vcmlaq_rot180 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vcmla.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_rot180_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_rot180_m_f16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmlat.f16 q[0-9]+, q[0-9]+, q[0-9]+, #180(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b, float16x8_t c, mve_pred16_t p)
+ {
+ return vcmlaq_rot180_m_f16 (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmlat.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmlat.f16 q[0-9]+, q[0-9]+, q[0-9]+, #180(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b, float16x8_t c, mve_pred16_t p)
+ {
+ return vcmlaq_rot180_m (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmlat.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_rot180_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_rot180_m_f32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmlat.f32 q[0-9]+, q[0-9]+, q[0-9]+, #180(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b, float32x4_t c, mve_pred16_t p)
+ {
+ return vcmlaq_rot180_m_f32 (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmlat.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmlat.f32 q[0-9]+, q[0-9]+, q[0-9]+, #180(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b, float32x4_t c, mve_pred16_t p)
+ {
+ return vcmlaq_rot180_m (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmlat.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_rot270_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_rot270_f16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmla.f16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b, float16x8_t c)
+ {
+ return vcmlaq_rot270_f16 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vcmla.f16" } } */
+
++/*
++**foo1:
++** ...
++** vcmla.f16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b, float16x8_t c)
+ {
+ return vcmlaq_rot270 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vcmla.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_rot270_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_rot270_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmla.f32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b, float32x4_t c)
+ {
+ return vcmlaq_rot270_f32 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vcmla.f32" } } */
+
++/*
++**foo1:
++** ...
++** vcmla.f32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b, float32x4_t c)
+ {
+ return vcmlaq_rot270 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vcmla.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_rot270_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_rot270_m_f16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmlat.f16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b, float16x8_t c, mve_pred16_t p)
+ {
+ return vcmlaq_rot270_m_f16 (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmlat.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmlat.f16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b, float16x8_t c, mve_pred16_t p)
+ {
+ return vcmlaq_rot270_m (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmlat.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_rot270_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_rot270_m_f32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmlat.f32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b, float32x4_t c, mve_pred16_t p)
+ {
+ return vcmlaq_rot270_m_f32 (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmlat.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmlat.f32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b, float32x4_t c, mve_pred16_t p)
+ {
+ return vcmlaq_rot270_m (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmlat.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_rot90_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_rot90_f16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmla.f16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b, float16x8_t c)
+ {
+ return vcmlaq_rot90_f16 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vcmla.f16" } } */
+
++/*
++**foo1:
++** ...
++** vcmla.f16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b, float16x8_t c)
+ {
+ return vcmlaq_rot90 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vcmla.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_rot90_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_rot90_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmla.f32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b, float32x4_t c)
+ {
+ return vcmlaq_rot90_f32 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vcmla.f32" } } */
+
++/*
++**foo1:
++** ...
++** vcmla.f32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b, float32x4_t c)
+ {
+ return vcmlaq_rot90 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vcmla.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_rot90_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_rot90_m_f16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmlat.f16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b, float16x8_t c, mve_pred16_t p)
+ {
+ return vcmlaq_rot90_m_f16 (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmlat.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmlat.f16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b, float16x8_t c, mve_pred16_t p)
+ {
+ return vcmlaq_rot90_m (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmlat.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_rot90_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmlaq_rot90_m_f32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmlat.f32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b, float32x4_t c, mve_pred16_t p)
+ {
+ return vcmlaq_rot90_m_f32 (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmlat.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmlat.f32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b, float32x4_t c, mve_pred16_t p)
+ {
+ return vcmlaq_rot90_m (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmlat.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u16.c
+@@ -1,22 +1,71 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.u16 cs, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint16x8_t a, uint16_t b, mve_pred16_t p)
+ {
+ return vcmpcsq_m_n_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.u16 cs, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint16x8_t a, uint16_t b, mve_pred16_t p)
+ {
+ return vcmpcsq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.u16 cs, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (uint16x8_t a, mve_pred16_t p)
++{
++ return vcmpcsq_m (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u32.c
+@@ -1,22 +1,71 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.u32 cs, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint32x4_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vcmpcsq_m_n_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.u32 cs, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint32x4_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vcmpcsq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.u32 cs, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (uint32x4_t a, mve_pred16_t p)
++{
++ return vcmpcsq_m (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u8.c
+@@ -1,22 +1,71 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.u8 cs, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint8x16_t a, uint8_t b, mve_pred16_t p)
+ {
+ return vcmpcsq_m_n_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.u8 cs, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint8x16_t a, uint8_t b, mve_pred16_t p)
+ {
+ return vcmpcsq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.u8 cs, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (uint8x16_t a, mve_pred16_t p)
++{
++ return vcmpcsq_m (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_u16.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.u16 cs, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vcmpcsq_m_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.u16 cs, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vcmpcsq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_u32.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.u32 cs, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vcmpcsq_m_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.u32 cs, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vcmpcsq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_u8.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.u8 cs, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vcmpcsq_m_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.u8 cs, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vcmpcsq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_n_u16.c
+@@ -1,21 +1,59 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.u16 cs, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint16x8_t a, uint16_t b)
+ {
+ return vcmpcsq_n_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.u16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.u16 cs, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint16x8_t a, uint16_t b)
+ {
+ return vcmpcsq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.u16" } } */
++/*
++**foo2: { xfail *-*-* }
++** ...
++** vcmp.u16 cs, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (uint16x8_t a)
++{
++ return vcmpcsq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_n_u32.c
+@@ -1,21 +1,59 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.u32 cs, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint32x4_t a, uint32_t b)
+ {
+ return vcmpcsq_n_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.u32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.u32 cs, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint32x4_t a, uint32_t b)
+ {
+ return vcmpcsq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.u32" } } */
++/*
++**foo2: { xfail *-*-* }
++** ...
++** vcmp.u32 cs, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (uint32x4_t a)
++{
++ return vcmpcsq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_n_u8.c
+@@ -1,21 +1,59 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.u8 cs, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint8x16_t a, uint8_t b)
+ {
+ return vcmpcsq_n_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.u8" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.u8 cs, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint8x16_t a, uint8_t b)
+ {
+ return vcmpcsq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.u8" } } */
++/*
++**foo2: { xfail *-*-* }
++** ...
++** vcmp.u8 cs, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (uint8x16_t a)
++{
++ return vcmpcsq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_u16.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.u16 cs, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint16x8_t a, uint16x8_t b)
+ {
+ return vcmpcsq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.u16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.u16 cs, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint16x8_t a, uint16x8_t b)
+ {
+ return vcmpcsq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_u32.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.u32 cs, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint32x4_t a, uint32x4_t b)
+ {
+ return vcmpcsq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.u32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.u32 cs, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint32x4_t a, uint32x4_t b)
+ {
+ return vcmpcsq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_u8.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.u8 cs, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+ return vcmpcsq_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.u8" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.u8 cs, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint8x16_t a, uint8x16_t b)
+ {
+ return vcmpcsq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_f16.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.f16 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float16x8_t a, float16x8_t b)
+ {
+ return vcmpeqq_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.f16 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float16x8_t a, float16x8_t b)
+ {
+ return vcmpeqq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_f32.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.f32 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float32x4_t a, float32x4_t b)
+ {
+ return vcmpeqq_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.f32 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float32x4_t a, float32x4_t b)
+ {
+ return vcmpeqq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_f16.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f16 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f16 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_f32.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f32 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f32 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_f16-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-mve_pred16_t
+-foo1 (float16x8_t a, float16_t b, mve_pred16_t p)
+-{
+- return vcmpeqq_m (a, 23.23, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_f16.c
+@@ -1,22 +1,71 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f16 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float16x8_t a, float16_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m_n_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f16 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float16x8_t a, float16_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f16 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (float16x8_t a, mve_pred16_t p)
++{
++ return vcmpeqq_m (a, 1.1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_f32-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-mve_pred16_t
+-foo1 (float32x4_t a, float32_t b, mve_pred16_t p)
+-{
+- return vcmpeqq_m (a, 23.23, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_f32.c
+@@ -1,22 +1,71 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f32 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float32x4_t a, float32_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m_n_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f32 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float32x4_t a, float32_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f32 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (float32x4_t a, mve_pred16_t p)
++{
++ return vcmpeqq_m (a, 1.1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_s16.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i16 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m_n_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i16 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_s32.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i32 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m_n_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i32 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_s8.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i8 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m_n_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i8 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_u16.c
+@@ -1,22 +1,71 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i16 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint16x8_t a, uint16_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m_n_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i16 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint16x8_t a, uint16_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i16 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (uint16x8_t a, mve_pred16_t p)
++{
++ return vcmpeqq_m (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_u32.c
+@@ -1,22 +1,71 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i32 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint32x4_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m_n_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i32 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint32x4_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i32 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (uint32x4_t a, mve_pred16_t p)
++{
++ return vcmpeqq_m (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_u8.c
+@@ -1,22 +1,71 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i8 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint8x16_t a, uint8_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m_n_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i8 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint8x16_t a, uint8_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i8 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (uint8x16_t a, mve_pred16_t p)
++{
++ return vcmpeqq_m (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_s16.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i16 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i16 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_s32.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i32 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i32 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_s8.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i8 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i8 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_u16.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i16 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i16 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_u32.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i32 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i32 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_u8.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i8 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i8 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vcmpeqq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_f16-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-mve_pred16_t
+-foo1 (float16x8_t a, float16_t b)
+-{
+- return vcmpeqq (a, 23.23);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_f16.c
+@@ -1,21 +1,59 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.f16 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float16x8_t a, float16_t b)
+ {
+ return vcmpeqq_n_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.f16 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float16x8_t a, float16_t b)
+ {
+ return vcmpeqq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f16" } } */
++/*
++**foo2: { xfail *-*-* }
++** ...
++** vcmp.f16 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (float16x8_t a)
++{
++ return vcmpeqq (a, 1.1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_f32-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-mve_pred16_t
+-foo1 (float32x4_t a, float32_t b)
+-{
+- return vcmpeqq (a, 23.23);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_f32.c
+@@ -1,21 +1,59 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.f32 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float32x4_t a, float32_t b)
+ {
+ return vcmpeqq_n_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.f32 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float32x4_t a, float32_t b)
+ {
+ return vcmpeqq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f32" } } */
++/*
++**foo2: { xfail *-*-* }
++** ...
++** vcmp.f32 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (float32x4_t a)
++{
++ return vcmpeqq (a, 1.1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_s16.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.i16 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int16x8_t a, int16_t b)
+ {
+ return vcmpeqq_n_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.i16 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int16x8_t a, int16_t b)
+ {
+ return vcmpeqq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_s32.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.i32 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int32x4_t a, int32_t b)
+ {
+ return vcmpeqq_n_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.i32 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int32x4_t a, int32_t b)
+ {
+ return vcmpeqq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_s8.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.i8 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int8x16_t a, int8_t b)
+ {
+ return vcmpeqq_n_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i8" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.i8 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int8x16_t a, int8_t b)
+ {
+ return vcmpeqq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_u16.c
+@@ -1,21 +1,59 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.i16 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint16x8_t a, uint16_t b)
+ {
+ return vcmpeqq_n_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.i16 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint16x8_t a, uint16_t b)
+ {
+ return vcmpeqq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i16" } } */
++/*
++**foo2: { xfail *-*-* }
++** ...
++** vcmp.i16 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (uint16x8_t a)
++{
++ return vcmpeqq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_u32.c
+@@ -1,21 +1,59 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.i32 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint32x4_t a, uint32_t b)
+ {
+ return vcmpeqq_n_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.i32 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint32x4_t a, uint32_t b)
+ {
+ return vcmpeqq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i32" } } */
++/*
++**foo2: { xfail *-*-* }
++** ...
++** vcmp.i32 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (uint32x4_t a)
++{
++ return vcmpeqq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_u8.c
+@@ -1,21 +1,59 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.i8 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint8x16_t a, uint8_t b)
+ {
+ return vcmpeqq_n_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i8" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.i8 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint8x16_t a, uint8_t b)
+ {
+ return vcmpeqq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i8" } } */
++/*
++**foo2: { xfail *-*-* }
++** ...
++** vcmp.i8 eq, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (uint8x16_t a)
++{
++ return vcmpeqq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_s16.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.i16 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vcmpeqq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.i16 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vcmpeqq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_s32.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.i32 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vcmpeqq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.i32 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vcmpeqq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_s8.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.i8 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vcmpeqq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i8" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.i8 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vcmpeqq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_u16.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.i16 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint16x8_t a, uint16x8_t b)
+ {
+ return vcmpeqq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.i16 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint16x8_t a, uint16x8_t b)
+ {
+ return vcmpeqq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_u32.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.i32 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint32x4_t a, uint32x4_t b)
+ {
+ return vcmpeqq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.i32 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint32x4_t a, uint32x4_t b)
+ {
+ return vcmpeqq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_u8.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.i8 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+ return vcmpeqq_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i8" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.i8 eq, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint8x16_t a, uint8x16_t b)
+ {
+ return vcmpeqq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_f16.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.f16 ge, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float16x8_t a, float16x8_t b)
+ {
+ return vcmpgeq_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.f16 ge, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float16x8_t a, float16x8_t b)
+ {
+ return vcmpgeq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_f32.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.f32 ge, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float32x4_t a, float32x4_t b)
+ {
+ return vcmpgeq_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.f32 ge, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float32x4_t a, float32x4_t b)
+ {
+ return vcmpgeq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_f16.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f16 ge, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcmpgeq_m_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f16 ge, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcmpgeq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_f32.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f32 ge, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcmpgeq_m_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f32 ge, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcmpgeq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_f16-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-mve_pred16_t
+-foo1 (float16x8_t a, float16_t b, mve_pred16_t p)
+-{
+- return vcmpgeq_m (a, 23.23, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_f16.c
+@@ -1,22 +1,71 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f16 ge, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float16x8_t a, float16_t b, mve_pred16_t p)
+ {
+ return vcmpgeq_m_n_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f16 ge, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float16x8_t a, float16_t b, mve_pred16_t p)
+ {
+ return vcmpgeq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f16 ge, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (float16x8_t a, mve_pred16_t p)
++{
++ return vcmpgeq_m (a, 1.1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_f32-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-mve_pred16_t
+-foo1 (float32x4_t a, float32_t b, mve_pred16_t p)
+-{
+- return vcmpgeq_m (a, 23.23, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_f32.c
+@@ -1,22 +1,71 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f32 ge, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float32x4_t a, float32_t b, mve_pred16_t p)
+ {
+ return vcmpgeq_m_n_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f32 ge, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float32x4_t a, float32_t b, mve_pred16_t p)
+ {
+ return vcmpgeq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f32 ge, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (float32x4_t a, mve_pred16_t p)
++{
++ return vcmpgeq_m (a, 1.1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s16.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s16 ge, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vcmpgeq_m_n_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s16 ge, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vcmpgeq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s32.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s32 ge, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vcmpgeq_m_n_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s32 ge, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vcmpgeq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s8.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s8 ge, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vcmpgeq_m_n_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s8 ge, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vcmpgeq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_s16.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s16 ge, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vcmpgeq_m_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s16 ge, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vcmpgeq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_s32.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s32 ge, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vcmpgeq_m_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s32 ge, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vcmpgeq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_s8.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s8 ge, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vcmpgeq_m_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s8 ge, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vcmpgeq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_f16-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-mve_pred16_t
+-foo1 (float16x8_t a, float16_t b)
+-{
+- return vcmpgeq (a, 23.23);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_f16.c
+@@ -1,21 +1,59 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.f16 ge, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float16x8_t a, float16_t b)
+ {
+ return vcmpgeq_n_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.f16 ge, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float16x8_t a, float16_t b)
+ {
+ return vcmpgeq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f16" } } */
++/*
++**foo2: { xfail *-*-* }
++** ...
++** vcmp.f16 ge, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (float16x8_t a)
++{
++ return vcmpgeq (a, 1.1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_f32-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-mve_pred16_t
+-foo1 (float32x4_t a, float32_t b)
+-{
+- return vcmpgeq (a, 23.23);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_f32.c
+@@ -1,21 +1,59 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.f32 ge, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float32x4_t a, float32_t b)
+ {
+ return vcmpgeq_n_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.f32 ge, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float32x4_t a, float32_t b)
+ {
+ return vcmpgeq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f32" } } */
++/*
++**foo2: { xfail *-*-* }
++** ...
++** vcmp.f32 ge, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (float32x4_t a)
++{
++ return vcmpgeq (a, 1.1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_s16.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.s16 ge, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int16x8_t a, int16_t b)
+ {
+ return vcmpgeq_n_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.s16 ge, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int16x8_t a, int16_t b)
+ {
+ return vcmpgeq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_s32.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.s32 ge, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int32x4_t a, int32_t b)
+ {
+ return vcmpgeq_n_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.s32 ge, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int32x4_t a, int32_t b)
+ {
+ return vcmpgeq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_s8.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.s8 ge, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int8x16_t a, int8_t b)
+ {
+ return vcmpgeq_n_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s8" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.s8 ge, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int8x16_t a, int8_t b)
+ {
+ return vcmpgeq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_s16.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.s16 ge, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vcmpgeq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.s16 ge, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vcmpgeq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_s32.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.s32 ge, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vcmpgeq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.s32 ge, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vcmpgeq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_s8.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.s8 ge, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vcmpgeq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s8" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.s8 ge, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vcmpgeq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_f16.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.f16 gt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float16x8_t a, float16x8_t b)
+ {
+ return vcmpgtq_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.f16 gt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float16x8_t a, float16x8_t b)
+ {
+ return vcmpgtq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_f32.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.f32 gt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float32x4_t a, float32x4_t b)
+ {
+ return vcmpgtq_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.f32 gt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float32x4_t a, float32x4_t b)
+ {
+ return vcmpgtq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_f16.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f16 gt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcmpgtq_m_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f16 gt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcmpgtq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_f32.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f32 gt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcmpgtq_m_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f32 gt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcmpgtq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_f16-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-mve_pred16_t
+-foo1 (float16x8_t a, float16_t b, mve_pred16_t p)
+-{
+- return vcmpgtq_m (a, 23.23, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_f16.c
+@@ -1,22 +1,71 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f16 gt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float16x8_t a, float16_t b, mve_pred16_t p)
+ {
+ return vcmpgtq_m_n_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f16 gt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float16x8_t a, float16_t b, mve_pred16_t p)
+ {
+ return vcmpgtq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f16 gt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (float16x8_t a, mve_pred16_t p)
++{
++ return vcmpgtq_m (a, 1.1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_f32-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-mve_pred16_t
+-foo1 (float32x4_t a, float32_t b, mve_pred16_t p)
+-{
+- return vcmpgtq_m (a, 23.23, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_f32.c
+@@ -1,22 +1,71 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f32 gt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float32x4_t a, float32_t b, mve_pred16_t p)
+ {
+ return vcmpgtq_m_n_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f32 gt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float32x4_t a, float32_t b, mve_pred16_t p)
+ {
+ return vcmpgtq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f32 gt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (float32x4_t a, mve_pred16_t p)
++{
++ return vcmpgtq_m (a, 1.1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_s16.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s16 gt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vcmpgtq_m_n_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s16 gt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vcmpgtq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_s32.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s32 gt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vcmpgtq_m_n_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s32 gt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vcmpgtq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_s8.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s8 gt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vcmpgtq_m_n_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s8 gt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vcmpgtq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_s16.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s16 gt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vcmpgtq_m_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s16 gt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vcmpgtq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_s32.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s32 gt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vcmpgtq_m_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s32 gt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vcmpgtq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_s8.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s8 gt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vcmpgtq_m_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s8 gt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vcmpgtq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_n_f16-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-mve_pred16_t
+-foo1 (float16x8_t a, float16_t b)
+-{
+- return vcmpgtq (a, 23.23);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_n_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_n_f16.c
+@@ -1,21 +1,59 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.f16 gt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float16x8_t a, float16_t b)
+ {
+ return vcmpgtq_n_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.f16 gt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float16x8_t a, float16_t b)
+ {
+ return vcmpgtq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f16" } } */
++/*
++**foo2: { xfail *-*-* }
++** ...
++** vcmp.f16 gt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (float16x8_t a)
++{
++ return vcmpgtq (a, 1.1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_n_f32-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-mve_pred16_t
+-foo1 (float32x4_t a, float32_t b)
+-{
+- return vcmpgtq (a, 23.23);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_n_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_n_f32.c
+@@ -1,21 +1,59 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.f32 gt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float32x4_t a, float32_t b)
+ {
+ return vcmpgtq_n_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.f32 gt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float32x4_t a, float32_t b)
+ {
+ return vcmpgtq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f32" } } */
++/*
++**foo2: { xfail *-*-* }
++** ...
++** vcmp.f32 gt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (float32x4_t a)
++{
++ return vcmpgtq (a, 1.1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_n_s16.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.s16 gt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int16x8_t a, int16_t b)
+ {
+ return vcmpgtq_n_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.s16 gt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int16x8_t a, int16_t b)
+ {
+ return vcmpgtq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_n_s32.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.s32 gt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int32x4_t a, int32_t b)
+ {
+ return vcmpgtq_n_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.s32 gt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int32x4_t a, int32_t b)
+ {
+ return vcmpgtq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_n_s8.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.s8 gt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int8x16_t a, int8_t b)
+ {
+ return vcmpgtq_n_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s8" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.s8 gt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int8x16_t a, int8_t b)
+ {
+ return vcmpgtq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_s16.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.s16 gt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vcmpgtq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.s16 gt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vcmpgtq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_s32.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.s32 gt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vcmpgtq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.s32 gt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vcmpgtq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_s8.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.s8 gt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vcmpgtq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s8" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.s8 gt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vcmpgtq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_n_u16.c
+@@ -1,22 +1,71 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.u16 hi, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint16x8_t a, uint16_t b, mve_pred16_t p)
+ {
+ return vcmphiq_m_n_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.u16 hi, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint16x8_t a, uint16_t b, mve_pred16_t p)
+ {
+ return vcmphiq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.u16 hi, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (uint16x8_t a, mve_pred16_t p)
++{
++ return vcmphiq_m (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_n_u32.c
+@@ -1,22 +1,71 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.u32 hi, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint32x4_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vcmphiq_m_n_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.u32 hi, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint32x4_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vcmphiq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.u32 hi, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (uint32x4_t a, mve_pred16_t p)
++{
++ return vcmphiq_m (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_n_u8.c
+@@ -1,22 +1,71 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.u8 hi, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint8x16_t a, uint8_t b, mve_pred16_t p)
+ {
+ return vcmphiq_m_n_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.u8 hi, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint8x16_t a, uint8_t b, mve_pred16_t p)
+ {
+ return vcmphiq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.u8 hi, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (uint8x16_t a, mve_pred16_t p)
++{
++ return vcmphiq_m (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_u16.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.u16 hi, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vcmphiq_m_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.u16 hi, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vcmphiq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_u32.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.u32 hi, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vcmphiq_m_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.u32 hi, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vcmphiq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_u8.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.u8 hi, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vcmphiq_m_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.u8 hi, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vcmphiq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_n_u16.c
+@@ -1,21 +1,59 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.u16 hi, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint16x8_t a, uint16_t b)
+ {
+ return vcmphiq_n_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.u16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.u16 hi, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint16x8_t a, uint16_t b)
+ {
+ return vcmphiq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.u16" } } */
++/*
++**foo2: { xfail *-*-* }
++** ...
++** vcmp.u16 hi, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (uint16x8_t a)
++{
++ return vcmphiq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_n_u32.c
+@@ -1,21 +1,59 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.u32 hi, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint32x4_t a, uint32_t b)
+ {
+ return vcmphiq_n_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.u32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.u32 hi, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint32x4_t a, uint32_t b)
+ {
+ return vcmphiq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.u32" } } */
++/*
++**foo2: { xfail *-*-* }
++** ...
++** vcmp.u32 hi, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (uint32x4_t a)
++{
++ return vcmphiq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_n_u8.c
+@@ -1,21 +1,59 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.u8 hi, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint8x16_t a, uint8_t b)
+ {
+ return vcmphiq_n_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.u8" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.u8 hi, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint8x16_t a, uint8_t b)
+ {
+ return vcmphiq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.u8" } } */
++/*
++**foo2: { xfail *-*-* }
++** ...
++** vcmp.u8 hi, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (uint8x16_t a)
++{
++ return vcmphiq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_u16.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.u16 hi, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint16x8_t a, uint16x8_t b)
+ {
+ return vcmphiq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.u16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.u16 hi, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint16x8_t a, uint16x8_t b)
+ {
+ return vcmphiq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_u32.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.u32 hi, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint32x4_t a, uint32x4_t b)
+ {
+ return vcmphiq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.u32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.u32 hi, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint32x4_t a, uint32x4_t b)
+ {
+ return vcmphiq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_u8.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.u8 hi, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+ return vcmphiq_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.u8" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.u8 hi, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint8x16_t a, uint8x16_t b)
+ {
+ return vcmphiq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_f16.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.f16 le, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float16x8_t a, float16x8_t b)
+ {
+ return vcmpleq_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.f16 le, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float16x8_t a, float16x8_t b)
+ {
+ return vcmpleq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_f32.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.f32 le, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float32x4_t a, float32x4_t b)
+ {
+ return vcmpleq_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.f32 le, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float32x4_t a, float32x4_t b)
+ {
+ return vcmpleq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_f16.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f16 le, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcmpleq_m_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f16 le, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcmpleq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_f32.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f32 le, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcmpleq_m_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f32 le, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcmpleq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_n_f16-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-mve_pred16_t
+-foo1 (float16x8_t a, float16_t b, mve_pred16_t p)
+-{
+- return vcmpleq_m (a, 23.23, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_n_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_n_f16.c
+@@ -1,22 +1,71 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f16 le, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float16x8_t a, float16_t b, mve_pred16_t p)
+ {
+ return vcmpleq_m_n_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f16 le, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float16x8_t a, float16_t b, mve_pred16_t p)
+ {
+ return vcmpleq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f16 le, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (float16x8_t a, mve_pred16_t p)
++{
++ return vcmpleq_m (a, 1.1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_n_f32-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-mve_pred16_t
+-foo1 (float32x4_t a, float32_t b, mve_pred16_t p)
+-{
+- return vcmpleq_m (a, 23.23, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_n_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_n_f32.c
+@@ -1,22 +1,71 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f32 le, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float32x4_t a, float32_t b, mve_pred16_t p)
+ {
+ return vcmpleq_m_n_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f32 le, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float32x4_t a, float32_t b, mve_pred16_t p)
+ {
+ return vcmpleq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f32 le, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (float32x4_t a, mve_pred16_t p)
++{
++ return vcmpleq_m (a, 1.1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_n_s16.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s16 le, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vcmpleq_m_n_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s16 le, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vcmpleq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_n_s32.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s32 le, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vcmpleq_m_n_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s32 le, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vcmpleq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_n_s8.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s8 le, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vcmpleq_m_n_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s8 le, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vcmpleq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_s16.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s16 le, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vcmpleq_m_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s16 le, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vcmpleq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_s32.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s32 le, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vcmpleq_m_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s32 le, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vcmpleq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_s8.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s8 le, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vcmpleq_m_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s8 le, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vcmpleq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_n_f16-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-mve_pred16_t
+-foo1 (float16x8_t a, float16_t b)
+-{
+- return vcmpleq (a, 23.23);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_n_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_n_f16.c
+@@ -1,21 +1,59 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.f16 le, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float16x8_t a, float16_t b)
+ {
+ return vcmpleq_n_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.f16 le, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float16x8_t a, float16_t b)
+ {
+ return vcmpleq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f16" } } */
++/*
++**foo2: { xfail *-*-* }
++** ...
++** vcmp.f16 le, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (float16x8_t a)
++{
++ return vcmpleq (a, 1.1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_n_f32-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-mve_pred16_t
+-foo1 (float32x4_t a, float32_t b)
+-{
+- return vcmpleq (a, 23.23);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_n_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_n_f32.c
+@@ -1,21 +1,59 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.f32 le, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float32x4_t a, float32_t b)
+ {
+ return vcmpleq_n_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.f32 le, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float32x4_t a, float32_t b)
+ {
+ return vcmpleq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f32" } } */
++/*
++**foo2: { xfail *-*-* }
++** ...
++** vcmp.f32 le, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (float32x4_t a)
++{
++ return vcmpleq (a, 1.1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_n_s16.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.s16 le, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int16x8_t a, int16_t b)
+ {
+ return vcmpleq_n_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.s16 le, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int16x8_t a, int16_t b)
+ {
+ return vcmpleq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_n_s32.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.s32 le, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int32x4_t a, int32_t b)
+ {
+ return vcmpleq_n_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.s32 le, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int32x4_t a, int32_t b)
+ {
+ return vcmpleq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_n_s8.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.s8 le, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int8x16_t a, int8_t b)
+ {
+ return vcmpleq_n_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s8" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.s8 le, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int8x16_t a, int8_t b)
+ {
+ return vcmpleq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_s16.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.s16 le, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vcmpleq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.s16 le, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vcmpleq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_s32.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.s32 le, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vcmpleq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.s32 le, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vcmpleq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_s8.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.s8 le, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vcmpleq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s8" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.s8 le, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vcmpleq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_f16.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.f16 lt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float16x8_t a, float16x8_t b)
+ {
+ return vcmpltq_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.f16 lt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float16x8_t a, float16x8_t b)
+ {
+ return vcmpltq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_f32.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.f32 lt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float32x4_t a, float32x4_t b)
+ {
+ return vcmpltq_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.f32 lt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float32x4_t a, float32x4_t b)
+ {
+ return vcmpltq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_f16.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f16 lt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcmpltq_m_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f16 lt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcmpltq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_f32.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f32 lt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcmpltq_m_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f32 lt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcmpltq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_n_f16-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-mve_pred16_t
+-foo1 (float16x8_t a, float16_t b, mve_pred16_t p)
+-{
+- return vcmpltq_m (a, 23.23, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_n_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_n_f16.c
+@@ -1,22 +1,71 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f16 lt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float16x8_t a, float16_t b, mve_pred16_t p)
+ {
+ return vcmpltq_m_n_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f16 lt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float16x8_t a, float16_t b, mve_pred16_t p)
+ {
+ return vcmpltq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f16 lt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (float16x8_t a, mve_pred16_t p)
++{
++ return vcmpltq_m (a, 1.1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_n_f32-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-mve_pred16_t
+-foo1 (float32x4_t a, float32_t b, mve_pred16_t p)
+-{
+- return vcmpltq_m (a, 23.23, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_n_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_n_f32.c
+@@ -1,22 +1,71 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f32 lt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float32x4_t a, float32_t b, mve_pred16_t p)
+ {
+ return vcmpltq_m_n_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f32 lt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float32x4_t a, float32_t b, mve_pred16_t p)
+ {
+ return vcmpltq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f32 lt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (float32x4_t a, mve_pred16_t p)
++{
++ return vcmpltq_m (a, 1.1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_n_s16.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s16 lt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vcmpltq_m_n_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s16 lt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vcmpltq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_n_s32.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s32 lt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vcmpltq_m_n_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s32 lt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vcmpltq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_n_s8.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s8 lt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vcmpltq_m_n_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s8 lt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vcmpltq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_s16.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s16 lt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vcmpltq_m_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s16 lt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vcmpltq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_s32.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s32 lt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vcmpltq_m_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s32 lt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vcmpltq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_s8.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s8 lt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vcmpltq_m_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.s8 lt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vcmpltq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_f16-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-mve_pred16_t
+-foo1 (float16x8_t a, float16_t b)
+-{
+- return vcmpltq (a, 23.23);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_f16.c
+@@ -1,21 +1,59 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.f16 lt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float16x8_t a, float16_t b)
+ {
+ return vcmpltq_n_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.f16 lt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float16x8_t a, float16_t b)
+ {
+ return vcmpltq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f16" } } */
++/*
++**foo2: { xfail *-*-* }
++** ...
++** vcmp.f16 lt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (float16x8_t a)
++{
++ return vcmpltq (a, 1.1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_f32-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-mve_pred16_t
+-foo1 (float32x4_t a, float32_t b)
+-{
+- return vcmpltq (a, 23.23);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_f32.c
+@@ -1,21 +1,59 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.f32 lt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float32x4_t a, float32_t b)
+ {
+ return vcmpltq_n_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.f32 lt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float32x4_t a, float32_t b)
+ {
+ return vcmpltq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f32" } } */
++/*
++**foo2: { xfail *-*-* }
++** ...
++** vcmp.f32 lt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (float32x4_t a)
++{
++ return vcmpltq (a, 1.1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_s16.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.s16 lt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int16x8_t a, int16_t b)
+ {
+ return vcmpltq_n_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.s16 lt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int16x8_t a, int16_t b)
+ {
+ return vcmpltq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_s32.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.s32 lt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int32x4_t a, int32_t b)
+ {
+ return vcmpltq_n_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.s32 lt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int32x4_t a, int32_t b)
+ {
+ return vcmpltq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_s8.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.s8 lt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int8x16_t a, int8_t b)
+ {
+ return vcmpltq_n_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s8" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.s8 lt, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int8x16_t a, int8_t b)
+ {
+ return vcmpltq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_s16.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.s16 lt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vcmpltq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.s16 lt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vcmpltq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_s32.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.s32 lt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vcmpltq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.s32 lt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vcmpltq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_s8.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.s8 lt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vcmpltq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s8" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.s8 lt, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vcmpltq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_f16.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.f16 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float16x8_t a, float16x8_t b)
+ {
+ return vcmpneq_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.f16 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float16x8_t a, float16x8_t b)
+ {
+ return vcmpneq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_f32.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.f32 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float32x4_t a, float32x4_t b)
+ {
+ return vcmpneq_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.f32 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float32x4_t a, float32x4_t b)
+ {
+ return vcmpneq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_f16.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f16 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f16 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_f32.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f32 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f32 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_f16-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-mve_pred16_t
+-foo1 (float16x8_t a, float16_t b, mve_pred16_t p)
+-{
+- return vcmpneq_m (a, 23.23, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_f16.c
+@@ -1,22 +1,71 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f16 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float16x8_t a, float16_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m_n_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f16 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float16x8_t a, float16_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f16 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (float16x8_t a, mve_pred16_t p)
++{
++ return vcmpneq_m (a, 1.1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_f32-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-mve_pred16_t
+-foo1 (float32x4_t a, float32_t b, mve_pred16_t p)
+-{
+- return vcmpneq_m (a, 23.23, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_f32.c
+@@ -1,22 +1,71 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f32 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float32x4_t a, float32_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m_n_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f32 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float32x4_t a, float32_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.f32 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (float32x4_t a, mve_pred16_t p)
++{
++ return vcmpneq_m (a, 1.1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_s16.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i16 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m_n_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i16 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_s32.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i32 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m_n_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i32 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_s8.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i8 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m_n_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i8 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_u16.c
+@@ -1,22 +1,71 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i16 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint16x8_t a, uint16_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m_n_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i16 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint16x8_t a, uint16_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i16 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (uint16x8_t a, mve_pred16_t p)
++{
++ return vcmpneq_m (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_u32.c
+@@ -1,22 +1,71 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i32 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint32x4_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m_n_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i32 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint32x4_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i32 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (uint32x4_t a, mve_pred16_t p)
++{
++ return vcmpneq_m (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_u8.c
+@@ -1,22 +1,71 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i8 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint8x16_t a, uint8_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m_n_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i8 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint8x16_t a, uint8_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i8 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (uint8x16_t a, mve_pred16_t p)
++{
++ return vcmpneq_m (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_s16.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i16 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i16 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_s32.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i32 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i32 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_s8.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i8 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i8 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_u16.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i16 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i16 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_u32.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i32 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i32 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_u8.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i8 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmpt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmpt.i8 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vcmpneq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_f16-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-mve_pred16_t
+-foo1 (float16x8_t a, float16_t b)
+-{
+- return vcmpneq (a, 23.23);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_f16.c
+@@ -1,21 +1,59 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.f16 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float16x8_t a, float16_t b)
+ {
+ return vcmpneq_n_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.f16 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float16x8_t a, float16_t b)
+ {
+ return vcmpneq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f16" } } */
++/*
++**foo2: { xfail *-*-* }
++** ...
++** vcmp.f16 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (float16x8_t a)
++{
++ return vcmpneq (a, 1.1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_f32-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-mve_pred16_t
+-foo1 (float32x4_t a, float32_t b)
+-{
+- return vcmpneq (a, 23.23);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_f32.c
+@@ -1,21 +1,59 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.f32 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (float32x4_t a, float32_t b)
+ {
+ return vcmpneq_n_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.f32 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (float32x4_t a, float32_t b)
+ {
+ return vcmpneq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.f32" } } */
++/*
++**foo2: { xfail *-*-* }
++** ...
++** vcmp.f32 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (float32x4_t a)
++{
++ return vcmpneq (a, 1.1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_s16.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.i16 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int16x8_t a, int16_t b)
+ {
+ return vcmpneq_n_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.i16 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int16x8_t a, int16_t b)
+ {
+ return vcmpneq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_s32.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.i32 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int32x4_t a, int32_t b)
+ {
+ return vcmpneq_n_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.i32 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int32x4_t a, int32_t b)
+ {
+ return vcmpneq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_s8.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.i8 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int8x16_t a, int8_t b)
+ {
+ return vcmpneq_n_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i8" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.i8 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int8x16_t a, int8_t b)
+ {
+ return vcmpneq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_u16.c
+@@ -1,21 +1,59 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.i16 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint16x8_t a, uint16_t b)
+ {
+ return vcmpneq_n_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.i16 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint16x8_t a, uint16_t b)
+ {
+ return vcmpneq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i16" } } */
++/*
++**foo2: { xfail *-*-* }
++** ...
++** vcmp.i16 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (uint16x8_t a)
++{
++ return vcmpneq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_u32.c
+@@ -1,21 +1,59 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.i32 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint32x4_t a, uint32_t b)
+ {
+ return vcmpneq_n_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.i32 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint32x4_t a, uint32_t b)
+ {
+ return vcmpneq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i32" } } */
++/*
++**foo2: { xfail *-*-* }
++** ...
++** vcmp.i32 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (uint32x4_t a)
++{
++ return vcmpneq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_u8.c
+@@ -1,21 +1,59 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.i8 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint8x16_t a, uint8_t b)
+ {
+ return vcmpneq_n_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i8" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.i8 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint8x16_t a, uint8_t b)
+ {
+ return vcmpneq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i8" } } */
++/*
++**foo2: { xfail *-*-* }
++** ...
++** vcmp.i8 ne, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
++mve_pred16_t
++foo2 (uint8x16_t a)
++{
++ return vcmpneq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_s16.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.i16 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vcmpneq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.i16 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vcmpneq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_s32.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.i32 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vcmpneq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.i32 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vcmpneq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_s8.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.i8 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vcmpneq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i8" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.i8 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vcmpneq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_u16.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.i16 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint16x8_t a, uint16x8_t b)
+ {
+ return vcmpneq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i16" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.i16 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint16x8_t a, uint16x8_t b)
+ {
+ return vcmpneq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_u32.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.i32 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint32x4_t a, uint32x4_t b)
+ {
+ return vcmpneq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i32" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.i32 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint32x4_t a, uint32x4_t b)
+ {
+ return vcmpneq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_u8.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmp.i8 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+ return vcmpneq_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i8" } } */
+
++/*
++**foo1:
++** ...
++** vcmp.i8 ne, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo1 (uint8x16_t a, uint8x16_t b)
+ {
+ return vcmpneq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmp.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_f16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmul.f16 q[0-9]+, q[0-9]+, q[0-9]+, #0(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b)
+ {
+ return vcmulq_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmul.f16" } } */
+
++/*
++**foo1:
++** ...
++** vcmul.f16 q[0-9]+, q[0-9]+, q[0-9]+, #0(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b)
+ {
+ return vcmulq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmul.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmul.f32 q[0-9]+, q[0-9]+, q[0-9]+, #0(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b)
+ {
+ return vcmulq_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmul.f32" } } */
+
++/*
++**foo1:
++** ...
++** vcmul.f32 q[0-9]+, q[0-9]+, q[0-9]+, #0(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b)
+ {
+ return vcmulq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmul.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_m_f16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f16 q[0-9]+, q[0-9]+, q[0-9]+, #0(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcmulq_m_f16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmult.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f16 q[0-9]+, q[0-9]+, q[0-9]+, #0(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcmulq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmult.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_m_f32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f32 q[0-9]+, q[0-9]+, q[0-9]+, #0(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcmulq_m_f32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmult.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f32 q[0-9]+, q[0-9]+, q[0-9]+, #0(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcmulq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmult.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot180_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot180_f16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmul.f16 q[0-9]+, q[0-9]+, q[0-9]+, #180(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b)
+ {
+ return vcmulq_rot180_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmul.f16" } } */
+
++/*
++**foo1:
++** ...
++** vcmul.f16 q[0-9]+, q[0-9]+, q[0-9]+, #180(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b)
+ {
+ return vcmulq_rot180 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmul.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot180_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot180_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmul.f32 q[0-9]+, q[0-9]+, q[0-9]+, #180(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b)
+ {
+ return vcmulq_rot180_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmul.f32" } } */
+
++/*
++**foo1:
++** ...
++** vcmul.f32 q[0-9]+, q[0-9]+, q[0-9]+, #180(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b)
+ {
+ return vcmulq_rot180 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmul.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot180_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot180_m_f16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f16 q[0-9]+, q[0-9]+, q[0-9]+, #180(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcmulq_rot180_m_f16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmult.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f16 q[0-9]+, q[0-9]+, q[0-9]+, #180(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcmulq_rot180_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmult.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot180_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot180_m_f32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f32 q[0-9]+, q[0-9]+, q[0-9]+, #180(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcmulq_rot180_m_f32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmult.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f32 q[0-9]+, q[0-9]+, q[0-9]+, #180(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcmulq_rot180_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmult.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot180_x_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot180_x_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f16 q[0-9]+, q[0-9]+, q[0-9]+, #180(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcmulq_rot180_x_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmult.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f16 q[0-9]+, q[0-9]+, q[0-9]+, #180(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcmulq_rot180_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot180_x_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot180_x_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f32 q[0-9]+, q[0-9]+, q[0-9]+, #180(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcmulq_rot180_x_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmult.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f32 q[0-9]+, q[0-9]+, q[0-9]+, #180(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcmulq_rot180_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot270_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot270_f16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmul.f16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b)
+ {
+ return vcmulq_rot270_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmul.f16" } } */
+
++/*
++**foo1:
++** ...
++** vcmul.f16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b)
+ {
+ return vcmulq_rot270 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmul.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot270_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot270_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmul.f32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b)
+ {
+ return vcmulq_rot270_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmul.f32" } } */
+
++/*
++**foo1:
++** ...
++** vcmul.f32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b)
+ {
+ return vcmulq_rot270 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmul.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot270_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot270_m_f16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcmulq_rot270_m_f16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmult.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcmulq_rot270_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmult.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot270_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot270_m_f32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcmulq_rot270_m_f32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmult.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcmulq_rot270_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmult.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot270_x_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot270_x_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcmulq_rot270_x_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmult.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcmulq_rot270_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot270_x_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot270_x_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcmulq_rot270_x_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmult.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcmulq_rot270_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot90_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot90_f16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmul.f16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b)
+ {
+ return vcmulq_rot90_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmul.f16" } } */
+
++/*
++**foo1:
++** ...
++** vcmul.f16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b)
+ {
+ return vcmulq_rot90 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmul.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot90_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot90_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcmul.f32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b)
+ {
+ return vcmulq_rot90_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmul.f32" } } */
+
++/*
++**foo1:
++** ...
++** vcmul.f32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b)
+ {
+ return vcmulq_rot90 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcmul.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot90_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot90_m_f16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcmulq_rot90_m_f16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmult.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcmulq_rot90_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmult.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot90_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot90_m_f32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcmulq_rot90_m_f32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmult.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcmulq_rot90_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmult.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot90_x_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot90_x_f16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcmulq_rot90_x_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmult.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcmulq_rot90_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmult.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot90_x_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot90_x_f32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcmulq_rot90_x_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmult.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcmulq_rot90_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmult.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_x_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_x_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f16 q[0-9]+, q[0-9]+, q[0-9]+, #0(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcmulq_x_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmult.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f16 q[0-9]+, q[0-9]+, q[0-9]+, #0(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vcmulq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_x_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_x_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f32 q[0-9]+, q[0-9]+, q[0-9]+, #0(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcmulq_x_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcmult.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcmult.f32 q[0-9]+, q[0-9]+, q[0-9]+, #0(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcmulq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcreateq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcreateq_f16.c
+@@ -1,13 +1,42 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmov q[0-9+]\[2\], q[0-9+]\[0\], r0, r2
++** vmov q[0-9+]\[3\], q[0-9+]\[1\], r1, r3
++** ...
++*/
+ float16x8_t
+ foo (uint64_t a, uint64_t b)
+ {
+ return vcreateq_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmov" } } */
++/*
++**foo1:
++** ...
++** vmov q[0-9+]\[2\], q[0-9+]\[0\], r[0-9+], r[0-9+]
++** vmov q[0-9+]\[3\], q[0-9+]\[1\], r[0-9+], r[0-9+]
++** ...
++*/
++float16x8_t
++foo1 ()
++{
++ return vcreateq_f16 (1, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcreateq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcreateq_f32.c
+@@ -1,13 +1,42 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmov q[0-9+]\[2\], q[0-9+]\[0\], r0, r2
++** vmov q[0-9+]\[3\], q[0-9+]\[1\], r1, r3
++** ...
++*/
+ float32x4_t
+ foo (uint64_t a, uint64_t b)
+ {
+ return vcreateq_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmov" } } */
++/*
++**foo1:
++** ...
++** vmov q[0-9+]\[2\], q[0-9+]\[0\], r[0-9+], r[0-9+]
++** vmov q[0-9+]\[3\], q[0-9+]\[1\], r[0-9+], r[0-9+]
++** ...
++*/
++float32x4_t
++foo1 ()
++{
++ return vcreateq_f32 (1, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcreateq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcreateq_s16.c
+@@ -1,13 +1,42 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmov q[0-9+]\[2\], q[0-9+]\[0\], r0, r2
++** vmov q[0-9+]\[3\], q[0-9+]\[1\], r1, r3
++** ...
++*/
+ int16x8_t
+ foo (uint64_t a, uint64_t b)
+ {
+ return vcreateq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmov" } } */
++/*
++**foo1:
++** ...
++** vmov q[0-9+]\[2\], q[0-9+]\[0\], r[0-9+], r[0-9+]
++** vmov q[0-9+]\[3\], q[0-9+]\[1\], r[0-9+], r[0-9+]
++** ...
++*/
++int16x8_t
++foo1 ()
++{
++ return vcreateq_s16 (1, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcreateq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcreateq_s32.c
+@@ -1,13 +1,42 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmov q[0-9+]\[2\], q[0-9+]\[0\], r0, r2
++** vmov q[0-9+]\[3\], q[0-9+]\[1\], r1, r3
++** ...
++*/
+ int32x4_t
+ foo (uint64_t a, uint64_t b)
+ {
+ return vcreateq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmov" } } */
++/*
++**foo1:
++** ...
++** vmov q[0-9+]\[2\], q[0-9+]\[0\], r[0-9+], r[0-9+]
++** vmov q[0-9+]\[3\], q[0-9+]\[1\], r[0-9+], r[0-9+]
++** ...
++*/
++int32x4_t
++foo1 ()
++{
++ return vcreateq_s32 (1, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcreateq_s64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcreateq_s64.c
+@@ -1,13 +1,42 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmov q[0-9+]\[2\], q[0-9+]\[0\], r0, r2
++** vmov q[0-9+]\[3\], q[0-9+]\[1\], r1, r3
++** ...
++*/
+ int64x2_t
+ foo (uint64_t a, uint64_t b)
+ {
+ return vcreateq_s64 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmov" } } */
++/*
++**foo1:
++** ...
++** vmov q[0-9+]\[2\], q[0-9+]\[0\], r[0-9+], r[0-9+]
++** vmov q[0-9+]\[3\], q[0-9+]\[1\], r[0-9+], r[0-9+]
++** ...
++*/
++int64x2_t
++foo1 ()
++{
++ return vcreateq_s64 (1, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcreateq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcreateq_s8.c
+@@ -1,13 +1,42 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmov q[0-9+]\[2\], q[0-9+]\[0\], r0, r2
++** vmov q[0-9+]\[3\], q[0-9+]\[1\], r1, r3
++** ...
++*/
+ int8x16_t
+ foo (uint64_t a, uint64_t b)
+ {
+ return vcreateq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmov" } } */
++/*
++**foo1:
++** ...
++** vmov q[0-9+]\[2\], q[0-9+]\[0\], r[0-9+], r[0-9+]
++** vmov q[0-9+]\[3\], q[0-9+]\[1\], r[0-9+], r[0-9+]
++** ...
++*/
++int8x16_t
++foo1 ()
++{
++ return vcreateq_s8 (1, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcreateq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcreateq_u16.c
+@@ -1,13 +1,42 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmov q[0-9+]\[2\], q[0-9+]\[0\], r0, r2
++** vmov q[0-9+]\[3\], q[0-9+]\[1\], r1, r3
++** ...
++*/
+ uint16x8_t
+ foo (uint64_t a, uint64_t b)
+ {
+ return vcreateq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmov" } } */
++/*
++**foo1:
++** ...
++** vmov q[0-9+]\[2\], q[0-9+]\[0\], r[0-9+], r[0-9+]
++** vmov q[0-9+]\[3\], q[0-9+]\[1\], r[0-9+], r[0-9+]
++** ...
++*/
++uint16x8_t
++foo1 ()
++{
++ return vcreateq_u16 (1, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcreateq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcreateq_u32.c
+@@ -1,13 +1,42 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmov q[0-9+]\[2\], q[0-9+]\[0\], r0, r2
++** vmov q[0-9+]\[3\], q[0-9+]\[1\], r1, r3
++** ...
++*/
+ uint32x4_t
+ foo (uint64_t a, uint64_t b)
+ {
+ return vcreateq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmov" } } */
++/*
++**foo1:
++** ...
++** vmov q[0-9+]\[2\], q[0-9+]\[0\], r[0-9+], r[0-9+]
++** vmov q[0-9+]\[3\], q[0-9+]\[1\], r[0-9+], r[0-9+]
++** ...
++*/
++uint32x4_t
++foo1 ()
++{
++ return vcreateq_u32 (1, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcreateq_u64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcreateq_u64.c
+@@ -1,13 +1,42 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmov q[0-9+]\[2\], q[0-9+]\[0\], r0, r2
++** vmov q[0-9+]\[3\], q[0-9+]\[1\], r1, r3
++** ...
++*/
+ uint64x2_t
+ foo (uint64_t a, uint64_t b)
+ {
+ return vcreateq_u64 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmov" } } */
++/*
++**foo1:
++** ...
++** vmov q[0-9+]\[2\], q[0-9+]\[0\], r[0-9+], r[0-9+]
++** vmov q[0-9+]\[3\], q[0-9+]\[1\], r[0-9+], r[0-9+]
++** ...
++*/
++uint64x2_t
++foo1 ()
++{
++ return vcreateq_u64 (1, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcreateq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcreateq_u8.c
+@@ -1,13 +1,42 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmov q[0-9+]\[2\], q[0-9+]\[0\], r0, r2
++** vmov q[0-9+]\[3\], q[0-9+]\[1\], r1, r3
++** ...
++*/
+ uint8x16_t
+ foo (uint64_t a, uint64_t b)
+ {
+ return vcreateq_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmov" } } */
++/*
++**foo1:
++** ...
++** vmov q[0-9+]\[2\], q[0-9+]\[0\], r[0-9+], r[0-9+]
++** vmov q[0-9+]\[3\], q[0-9+]\[1\], r[0-9+], r[0-9+]
++** ...
++*/
++uint8x16_t
++foo1 ()
++{
++ return vcreateq_u8 (1, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vctp16q.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vctp16q.c
+@@ -1,21 +1,44 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vctp.16 (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint32_t a)
+ {
+ return vctp16q (a);
+ }
+
+-/* { dg-final { scan-assembler "vctp.16" } } */
+-
++/*
++**foo1:
++** ...
++** vctp.16 (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+-foo1 (uint32_t a)
++foo1 ()
+ {
+- return vctp16q (a);
++ return vctp16q (1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vctp.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vctp16q_m.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vctp16q_m.c
+@@ -1,22 +1,52 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vctpt.16 (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint32_t a, mve_pred16_t p)
+ {
+ return vctp16q_m (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vctpt.16" } } */
+-
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vctpt.16 (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+-foo1 (uint32_t a, mve_pred16_t p)
++foo1 (mve_pred16_t p)
+ {
+- return vctp16q_m (a, p);
++ return vctp16q_m (1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vctp32q.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vctp32q.c
+@@ -1,21 +1,44 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vctp.32 (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint32_t a)
+ {
+ return vctp32q (a);
+ }
+
+-/* { dg-final { scan-assembler "vctp.32" } } */
+-
++/*
++**foo1:
++** ...
++** vctp.32 (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+-foo1 (uint32_t a)
++foo1 ()
+ {
+- return vctp32q (a);
++ return vctp32q (1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vctp.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vctp32q_m.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vctp32q_m.c
+@@ -1,22 +1,52 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vctpt.32 (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint32_t a, mve_pred16_t p)
+ {
+ return vctp32q_m (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vctpt.32" } } */
+-
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vctpt.32 (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+-foo1 (uint32_t a, mve_pred16_t p)
++foo1 (mve_pred16_t p)
+ {
+- return vctp32q_m (a, p);
++ return vctp32q_m (1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vctp64q.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vctp64q.c
+@@ -1,21 +1,44 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vctp.64 (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint32_t a)
+ {
+ return vctp64q (a);
+ }
+
+-/* { dg-final { scan-assembler "vctp.64" } } */
+-
++/*
++**foo1:
++** ...
++** vctp.64 (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+-foo1 (uint32_t a)
++foo1 ()
+ {
+- return vctp64q (a);
++ return vctp64q (1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vctp.64" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vctp64q_m.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vctp64q_m.c
+@@ -1,22 +1,52 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vctpt.64 (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint32_t a, mve_pred16_t p)
+ {
+ return vctp64q_m (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vctpt.64" } } */
+-
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vctpt.64 (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+-foo1 (uint32_t a, mve_pred16_t p)
++foo1 (mve_pred16_t p)
+ {
+- return vctp64q_m (a, p);
++ return vctp64q_m (1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vctp8q.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vctp8q.c
+@@ -1,21 +1,44 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vctp.8 (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint32_t a)
+ {
+ return vctp8q (a);
+ }
+
+-/* { dg-final { scan-assembler "vctp.8" } } */
+-
++/*
++**foo1:
++** ...
++** vctp.8 (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+-foo1 (uint32_t a)
++foo1 ()
+ {
+- return vctp8q (a);
++ return vctp8q (1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vctp.8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vctp8q_m.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vctp8q_m.c
+@@ -1,22 +1,52 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vctpt.8 (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (uint32_t a, mve_pred16_t p)
+ {
+ return vctp8q_m (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vctpt.8" } } */
+-
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vctpt.8 (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+-foo1 (uint32_t a, mve_pred16_t p)
++foo1 (mve_pred16_t p)
+ {
+- return vctp8q_m (a, p);
++ return vctp8q_m (1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_m_s16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_m_s16_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtat.s16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtaq_m_s16_f16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtat.s16.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtat.s16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtaq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_m_s32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_m_s32_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtat.s32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtaq_m_s32_f32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtat.s32.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtat.s32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtaq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_m_u16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_m_u16_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtat.u16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtaq_m_u16_f16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtat.u16.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtat.u16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtaq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_m_u32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_m_u32_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtat.u32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtaq_m_u32_f32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtat.u32.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtat.u32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtaq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_s16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_s16_f16.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvta.s16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (float16x8_t a)
+ {
+ return vcvtaq_s16_f16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vcvta.s16.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_s32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_s32_f32.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvta.s32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (float32x4_t a)
+ {
+ return vcvtaq_s32_f32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vcvta.s32.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_u16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_u16_f16.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvta.u16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (float16x8_t a)
+ {
+- return vcvtaq_u16_f16 (a);
++ return vcvtaq_u16_f16 (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vcvta.u16.f16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_u32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_u32_f32.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvta.u32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (float32x4_t a)
+ {
+- return vcvtaq_u32_f32 (a);
++ return vcvtaq_u32_f32 (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vcvta.u32.f32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_x_s16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_x_s16_f16.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtat.s16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtaq_x_s16_f16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtat.s16.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_x_s32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_x_s32_f32.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtat.s32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtaq_x_s32_f32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtat.s32.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_x_u16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_x_u16_f16.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtat.u16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtaq_x_u16_f16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtat.u16.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_x_u32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_x_u32_f32.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtat.u32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtaq_x_u32_f32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtat.u32.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtbq_f16_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtbq_f16_f32.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvtb.f16.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float32x4_t b)
+ {
+ return vcvtbq_f16_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcvtb.f16.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtbq_f32_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtbq_f32_f16.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvtb.f32.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float16x8_t a)
+ {
+ return vcvtbq_f32_f16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vcvtb.f32.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtbq_m_f16_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtbq_m_f16_f32.c
+@@ -1,22 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtbt.f16.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcvtbq_m_f16_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtbt.f16.f32" } } */
+-
+-float16x8_t
+-foo1 (float16x8_t a, float32x4_t b, mve_pred16_t p)
+-{
+- return vcvtbq_m (a, b, p);
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtbq_m_f32_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtbq_m_f32_f16.c
+@@ -1,22 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtbt.f32.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtbq_m_f32_f16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtbt.f32.f16" } } */
+-
+-float32x4_t
+-foo1 (float32x4_t inactive, float16x8_t a, mve_pred16_t p)
+-{
+- return vcvtbq_m (inactive, a, p);
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtbq_x_f32_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtbq_x_f32_f16.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtbt.f32.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtbq_x_f32_f16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtbt.f32.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_m_s16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_m_s16_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtmt.s16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtmq_m_s16_f16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtmt.s16.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtmt.s16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtmq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_m_s32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_m_s32_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtmt.s32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtmq_m_s32_f32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtmt.s32.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtmt.s32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtmq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_m_u16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_m_u16_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtmt.u16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtmq_m_u16_f16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtmt.u16.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtmt.u16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtmq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_m_u32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_m_u32_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtmt.u32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtmq_m_u32_f32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtmt.u32.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtmt.u32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtmq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_s16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_s16_f16.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvtm.s16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (float16x8_t a)
+ {
+ return vcvtmq_s16_f16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vcvtm.s16.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_s32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_s32_f32.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvtm.s32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (float32x4_t a)
+ {
+ return vcvtmq_s32_f32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vcvtm.s32.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_u16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_u16_f16.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvtm.u16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (float16x8_t a)
+ {
+- return vcvtmq_u16_f16 (a);
++ return vcvtmq_u16_f16 (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vcvtm.u16.f16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_u32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_u32_f32.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvtm.u32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (float32x4_t a)
+ {
+- return vcvtmq_u32_f32 (a);
++ return vcvtmq_u32_f32 (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vcvtm.u32.f32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_x_s16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_x_s16_f16.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtmt.s16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtmq_x_s16_f16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtmt.s16.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_x_s32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_x_s32_f32.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtmt.s32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtmq_x_s32_f32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtmt.s32.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_x_u16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_x_u16_f16.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtmt.u16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtmq_x_u16_f16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtmt.u16.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_x_u32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_x_u32_f32.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtmt.u32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtmq_x_u32_f32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtmt.u32.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_m_s16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_m_s16_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtnt.s16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtnq_m_s16_f16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtnt.s16.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtnt.s16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtnq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_m_s32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_m_s32_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtnt.s32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtnq_m_s32_f32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtnt.s32.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtnt.s32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtnq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_m_u16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_m_u16_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtnt.u16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtnq_m_u16_f16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtnt.u16.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtnt.u16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtnq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_m_u32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_m_u32_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtnt.u32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtnq_m_u32_f32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtnt.u32.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtnt.u32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtnq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_s16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_s16_f16.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvtn.s16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (float16x8_t a)
+ {
+ return vcvtnq_s16_f16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vcvtn.s16.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_s32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_s32_f32.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvtn.s32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (float32x4_t a)
+ {
+ return vcvtnq_s32_f32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vcvtn.s32.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_u16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_u16_f16.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvtn.u16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (float16x8_t a)
+ {
+- return vcvtnq_u16_f16 (a);
++ return vcvtnq_u16_f16 (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vcvtn.u16.f16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_u32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_u32_f32.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvtn.u32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (float32x4_t a)
+ {
+ return vcvtnq_u32_f32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vcvtn.u32.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_x_s16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_x_s16_f16.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtnt.s16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtnq_x_s16_f16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtnt.s16.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_x_s32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_x_s32_f32.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtnt.s32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtnq_x_s32_f32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtnt.s32.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_x_u16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_x_u16_f16.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtnt.u16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtnq_x_u16_f16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtnt.u16.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_x_u32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_x_u32_f32.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtnt.u32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtnq_x_u32_f32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtnt.u32.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_m_s16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_m_s16_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtpt.s16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtpq_m_s16_f16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtpt.s16.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtpt.s16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtpq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_m_s32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_m_s32_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtpt.s32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtpq_m_s32_f32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtpt.s32.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtpt.s32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtpq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_m_u16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_m_u16_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtpt.u16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtpq_m_u16_f16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtpt.u16.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtpt.u16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtpq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_m_u32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_m_u32_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtpt.u32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtpq_m_u32_f32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtpt.u32.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtpt.u32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtpq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_s16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_s16_f16.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvtp.s16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (float16x8_t a)
+ {
+ return vcvtpq_s16_f16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vcvtp.s16.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_s32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_s32_f32.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvtp.s32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (float32x4_t a)
+ {
+ return vcvtpq_s32_f32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vcvtp.s32.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_u16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_u16_f16.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvtp.u16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (float16x8_t a)
+ {
+- return vcvtpq_u16_f16 (a);
++ return vcvtpq_u16_f16 (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vcvtp.u16.f16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_u32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_u32_f32.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvtp.u32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (float32x4_t a)
+ {
+- return vcvtpq_u32_f32 (a);
++ return vcvtpq_u32_f32 (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vcvtp.u32.f32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_x_s16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_x_s16_f16.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtpt.s16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtpq_x_s16_f16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtpt.s16.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_x_s32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_x_s32_f32.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtpt.s32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtpq_x_s32_f32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtpt.s32.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_x_u16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_x_u16_f16.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtpt.u16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtpq_x_u16_f16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtpt.u16.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_x_u32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_x_u32_f32.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtpt.u32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtpq_x_u32_f32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtpt.u32.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_f16_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_f16_s16.c
+@@ -1,13 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvt.f16.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (int16x8_t a)
+ {
+ return vcvtq_f16_s16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vcvt.f16.s16" } } */
++
++/*
++**foo1:
++** ...
++** vcvt.f16.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++float16x8_t
++foo1 (int16x8_t a)
++{
++ return vcvtq (a);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_f16_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_f16_u16.c
+@@ -1,13 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvt.f16.u16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (uint16x8_t a)
+ {
+ return vcvtq_f16_u16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vcvt.f16.u16" } } */
++
++/*
++**foo1:
++** ...
++** vcvt.f16.u16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++float16x8_t
++foo1 (uint16x8_t a)
++{
++ return vcvtq (a);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_f32_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_f32_s32.c
+@@ -1,13 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvt.f32.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (int32x4_t a)
+ {
+ return vcvtq_f32_s32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vcvt.f32.s32" } } */
++
++/*
++**foo1:
++** ...
++** vcvt.f32.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++float32x4_t
++foo1 (int32x4_t a)
++{
++ return vcvtq (a);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_f32_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_f32_u32.c
+@@ -1,13 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvt.f32.u32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (uint32x4_t a)
+ {
+ return vcvtq_f32_u32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vcvt.f32.u32" } } */
++
++/*
++**foo1:
++** ...
++** vcvt.f32.u32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++float32x4_t
++foo1 (uint32x4_t a)
++{
++ return vcvtq (a);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_f16_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_f16_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f16.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vcvtq_m_f16_s16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.f16.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f16.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vcvtq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_f16_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_f16_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f16.u16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, uint16x8_t a, mve_pred16_t p)
+ {
+ return vcvtq_m_f16_u16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.f16.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f16.u16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, uint16x8_t a, mve_pred16_t p)
+ {
+ return vcvtq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_f32_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_f32_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f32.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, int32x4_t a, mve_pred16_t p)
+ {
+ return vcvtq_m_f32_s32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.f32.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f32.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, int32x4_t a, mve_pred16_t p)
+ {
+ return vcvtq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_f32_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_f32_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f32.u32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, uint32x4_t a, mve_pred16_t p)
+ {
+ return vcvtq_m_f32_u32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.f32.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f32.u32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, uint32x4_t a, mve_pred16_t p)
+ {
+ return vcvtq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_n_f16_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_n_f16_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f16.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vcvtq_m_n_f16_s16 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.f16.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f16.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vcvtq_m_n (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.f16.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_n_f16_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_n_f16_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f16.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, uint16x8_t a, mve_pred16_t p)
+ {
+ return vcvtq_m_n_f16_u16 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.f16.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f16.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, uint16x8_t a, mve_pred16_t p)
+ {
+ return vcvtq_m_n (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.f16.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_n_f32_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_n_f32_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f32.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, int32x4_t a, mve_pred16_t p)
+ {
+ return vcvtq_m_n_f32_s32 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.f32.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f32.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, int32x4_t a, mve_pred16_t p)
+ {
+ return vcvtq_m_n (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.f32.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_n_f32_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_n_f32_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f32.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, uint32x4_t a, mve_pred16_t p)
+ {
+- return vcvtq_m_n_f32_u32 (inactive, a, 16, p);
++ return vcvtq_m_n_f32_u32 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.f32.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f32.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, uint32x4_t a, mve_pred16_t p)
+ {
+- return vcvtq_m_n (inactive, a, 16, p);
++ return vcvtq_m_n (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.f32.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_n_s16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_n_s16_f16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.s16.f16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtq_m_n_s16_f16 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.s16.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.s16.f16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtq_m_n (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.s16.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_n_s32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_n_s32_f32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.s32.f32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtq_m_n_s32_f32 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.s32.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.s32.f32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtq_m_n (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.s32.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_n_u16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_n_u16_f16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.u16.f16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtq_m_n_u16_f16 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.u16.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.u16.f16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtq_m_n (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.u16.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_n_u32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_n_u32_f32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.u32.f32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtq_m_n_u32_f32 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.u32.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.u32.f32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtq_m_n (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.u32.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_s16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_s16_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.s16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtq_m_s16_f16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.s16.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.s16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_s32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_s32_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.s32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtq_m_s32_f32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.s32.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.s32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_u16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_u16_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.u16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtq_m_u16_f16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.u16.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.u16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_u32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_m_u32_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.u32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtq_m_u32_f32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.u32.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.u32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_n_f16_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_n_f16_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvt.f16.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (int16x8_t a)
+ {
+ return vcvtq_n_f16_s16 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vcvt.f16.s16" } } */
+
++/*
++**foo1:
++** ...
++** vcvt.f16.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (int16x8_t a)
+ {
+ return vcvtq_n (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vcvt.f16.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_n_f16_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_n_f16_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvt.f16.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (uint16x8_t a)
+ {
+ return vcvtq_n_f16_u16 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vcvt.f16.u16" } } */
+
++/*
++**foo1:
++** ...
++** vcvt.f16.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (uint16x8_t a)
+ {
+ return vcvtq_n (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vcvt.f16.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_n_f32_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_n_f32_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvt.f32.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (int32x4_t a)
+ {
+ return vcvtq_n_f32_s32 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vcvt.f32.s32" } } */
+
++/*
++**foo1:
++** ...
++** vcvt.f32.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (int32x4_t a)
+ {
+ return vcvtq_n (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vcvt.f32.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_n_f32_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_n_f32_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvt.f32.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (uint32x4_t a)
+ {
+ return vcvtq_n_f32_u32 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vcvt.f32.u32" } } */
+
++/*
++**foo1:
++** ...
++** vcvt.f32.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (uint32x4_t a)
+ {
+ return vcvtq_n (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vcvt.f32.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_n_s16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_n_s16_f16.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvt.s16.f16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (float16x8_t a)
+ {
+ return vcvtq_n_s16_f16 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vcvt.s16.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_n_s32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_n_s32_f32.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvt.s32.f32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (float32x4_t a)
+ {
+ return vcvtq_n_s32_f32 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vcvt.s32.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_n_u16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_n_u16_f16.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvt.u16.f16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (float16x8_t a)
+ {
+ return vcvtq_n_u16_f16 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vcvt.u16.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_n_u32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_n_u32_f32.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvt.u32.f32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (float32x4_t a)
+ {
+ return vcvtq_n_u32_f32 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vcvt.u32.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_s16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_s16_f16.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvt.s16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (float16x8_t a)
+ {
+ return vcvtq_s16_f16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vcvt.s16.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_s32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_s32_f32.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvt.s32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (float32x4_t a)
+ {
+ return vcvtq_s32_f32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vcvt.s32.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_u16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_u16_f16.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvt.u16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (float16x8_t a)
+ {
+- return vcvtq_u16_f16 (a);
++ return vcvtq_u16_f16 (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vcvt.u16.f16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_u32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_u32_f32.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvt.u32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (float32x4_t a)
+ {
+- return vcvtq_u32_f32 (a);
++ return vcvtq_u32_f32 (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vcvt.u32.f32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_f16_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_f16_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f16.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (int16x8_t a, mve_pred16_t p)
+ {
+ return vcvtq_x_f16_s16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.f16.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f16.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (int16x8_t a, mve_pred16_t p)
+ {
+ return vcvtq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.f16.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_f16_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_f16_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f16.u16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (uint16x8_t a, mve_pred16_t p)
+ {
+ return vcvtq_x_f16_u16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.f16.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f16.u16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (uint16x8_t a, mve_pred16_t p)
+ {
+ return vcvtq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.f16.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_f32_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_f32_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f32.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (int32x4_t a, mve_pred16_t p)
+ {
+ return vcvtq_x_f32_s32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.f32.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f32.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (int32x4_t a, mve_pred16_t p)
+ {
+ return vcvtq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.f32.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_f32_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_f32_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f32.u32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (uint32x4_t a, mve_pred16_t p)
+ {
+ return vcvtq_x_f32_u32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.f32.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f32.u32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (uint32x4_t a, mve_pred16_t p)
+ {
+ return vcvtq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.f32.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_f16_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_f16_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f16.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (int16x8_t a, mve_pred16_t p)
+ {
+ return vcvtq_x_n_f16_s16 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.f16.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f16.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (int16x8_t a, mve_pred16_t p)
+ {
+ return vcvtq_x_n (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.f16.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_f16_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_f16_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f16.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (uint16x8_t a, mve_pred16_t p)
+ {
+ return vcvtq_x_n_f16_u16 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.f16.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f16.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (uint16x8_t a, mve_pred16_t p)
+ {
+ return vcvtq_x_n (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.f16.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_f32_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_f32_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f32.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (int32x4_t a, mve_pred16_t p)
+ {
+ return vcvtq_x_n_f32_s32 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.f32.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f32.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (int32x4_t a, mve_pred16_t p)
+ {
+ return vcvtq_x_n (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.f32.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_f32_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_f32_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f32.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (uint32x4_t a, mve_pred16_t p)
+ {
+- return vcvtq_x_n_f32_u32 (a, 16, p);
++ return vcvtq_x_n_f32_u32 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.f32.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.f32.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (uint32x4_t a, mve_pred16_t p)
+ {
+- return vcvtq_x_n (a, 16, p);
++ return vcvtq_x_n (a, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.f32.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_s16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_s16_f16.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.s16.f16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtq_x_n_s16_f16 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.s16.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_s32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_s32_f32.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.s32.f32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtq_x_n_s32_f32 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.s32.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_u16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_u16_f16.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.u16.f16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtq_x_n_u16_f16 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.u16.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_u32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_u32_f32.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.u32.f32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtq_x_n_u32_f32 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.u32.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_s16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_s16_f16.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.s16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtq_x_s16_f16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.s16.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_s32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_s32_f32.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.s32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtq_x_s32_f32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.s32.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_u16_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_u16_f16.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.u16.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (float16x8_t a, mve_pred16_t p)
+ {
+ return vcvtq_x_u16_f16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.u16.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_u32_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_u32_f32.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvtt.u32.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (float32x4_t a, mve_pred16_t p)
+ {
+ return vcvtq_x_u32_f32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvtt.u32.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvttq_f16_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvttq_f16_f32.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvtt.f16.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float32x4_t b)
+ {
+ return vcvttq_f16_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vcvtt.f16.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvttq_f32_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvttq_f32_f16.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vcvtt.f32.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float16x8_t a)
+ {
+ return vcvttq_f32_f16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vcvtt.f32.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvttq_m_f16_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvttq_m_f16_f32.c
+@@ -1,22 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvttt.f16.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vcvttq_m_f16_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvttt.f16.f32" } } */
+-
+-float16x8_t
+-foo1 (float16x8_t a, float32x4_t b, mve_pred16_t p)
+-{
+- return vcvttq_m (a, b, p);
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvttq_m_f32_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvttq_m_f32_f16.c
+@@ -1,22 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvttt.f32.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vcvttq_m_f32_f16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvttt.f32.f16" } } */
+-
+-float32x4_t
+-foo1 (float32x4_t inactive, float16x8_t a, mve_pred16_t p)
+-{
+- return vcvttq_m (inactive, a, p);
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvttq_x_f32_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvttq_x_f32_f16.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vcvttt.f32.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float16x8_t a, mve_pred16_t p)
+ {
+ return vcvttq_x_f32_f16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vcvttt.f32.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_m_n_u16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint32_t a, mve_pred16_t p)
+ {
+ return vddupq_m_n_u16 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vddupt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint32_t a, mve_pred16_t p)
+ {
+ return vddupq_m (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vddupt.u16" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t inactive, mve_pred16_t p)
++{
++ return vddupq_m (inactive, 1, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_m_n_u32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32_t a, mve_pred16_t p)
+ {
+- return vddupq_m_n_u32 (inactive, a, 4, p);
++ return vddupq_m_n_u32 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vddupt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32_t a, mve_pred16_t p)
+ {
+- return vddupq_m (inactive, a, 4, p);
++ return vddupq_m (inactive, a, 1, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t inactive, mve_pred16_t p)
++{
++ return vddupq_m (inactive, 1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vddupt.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_m_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_m_n_u8.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint32_t a, mve_pred16_t p)
+ {
+- return vddupq_m_n_u8 (inactive, a, 4, p);
++ return vddupq_m_n_u8 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vddupt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint32_t a, mve_pred16_t p)
+ {
+- return vddupq_m (inactive, a, 4, p);
++ return vddupq_m (inactive, a, 1, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t inactive, mve_pred16_t p)
++{
++ return vddupq_m (inactive, 1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vddupt.u8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_m_wb_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_m_wb_u16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint32_t *a, mve_pred16_t p)
+ {
+ return vddupq_m_wb_u16 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vddupt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint32_t *a, mve_pred16_t p)
+ {
+ return vddupq_m (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vddupt.u16" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t inactive, mve_pred16_t p)
++{
++ return vddupq_m (inactive, 1, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_m_wb_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_m_wb_u32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32_t *a, mve_pred16_t p)
+ {
+- return vddupq_m_wb_u32 (inactive, a, 4, p);
++ return vddupq_m_wb_u32 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vddupt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32_t *a, mve_pred16_t p)
+ {
+- return vddupq_m (inactive, a, 4, p);
++ return vddupq_m (inactive, a, 1, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t inactive, mve_pred16_t p)
++{
++ return vddupq_m (inactive, 1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vddupt.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_m_wb_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_m_wb_u8.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint32_t *a, mve_pred16_t p)
+ {
+- return vddupq_m_wb_u8 (inactive, a, 4, p);
++ return vddupq_m_wb_u8 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vddupt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint32_t *a, mve_pred16_t p)
+ {
+- return vddupq_m (inactive, a, 4, p);
++ return vddupq_m (inactive, a, 1, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t inactive, mve_pred16_t p)
++{
++ return vddupq_m (inactive, 1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vddupt.u8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_n_u16.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vddup.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint32_t a)
+ {
+- return vddupq_n_u16 (a, 4);
++ return vddupq_n_u16 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vddup.u16" } } */
+
++/*
++**foo1:
++** ...
++** vddup.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint32_t a)
+ {
+- return vddupq_u16 (a, 4);
++ return vddupq_u16 (a, 1);
++}
++
++/*
++**foo2:
++** ...
++** vddup.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 ()
++{
++ return vddupq_u16 (1, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vddup.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_n_u32.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vddup.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32_t a)
+ {
+ return vddupq_n_u32 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vddup.u32" } } */
+
++/*
++**foo1:
++** ...
++** vddup.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32_t a)
+ {
+ return vddupq_u32 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vddup.u32" } } */
++/*
++**foo2:
++** ...
++** vddup.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 ()
++{
++ return vddupq_u32 (1, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_n_u8.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vddup.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint32_t a)
+ {
+ return vddupq_n_u8 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vddup.u8" } } */
+
++/*
++**foo1:
++** ...
++** vddup.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint32_t a)
+ {
+ return vddupq_u8 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vddup.u8" } } */
++/*
++**foo2:
++** ...
++** vddup.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 ()
++{
++ return vddupq_u8 (1, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_wb_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_wb_u16.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vddup.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint32_t *a)
+ {
+- return vddupq_wb_u16 (a, 4);
++ return vddupq_wb_u16 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vddup.u16" } } */
+
++/*
++**foo1:
++** ...
++** vddup.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint32_t *a)
+ {
+- return vddupq_u16 (a, 4);
++ return vddupq_u16 (a, 1);
++}
++
++/*
++**foo2:
++** ...
++** vddup.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 ()
++{
++ return vddupq_u16 (1, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vddup.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_wb_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_wb_u32.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vddup.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32_t *a)
+ {
+ return vddupq_wb_u32 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vddup.u32" } } */
+
++/*
++**foo1:
++** ...
++** vddup.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32_t *a)
+ {
+ return vddupq_u32 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vddup.u32" } } */
++/*
++**foo2:
++** ...
++** vddup.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 ()
++{
++ return vddupq_u32 (1, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_wb_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_wb_u8.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vddup.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint32_t *a)
+ {
+ return vddupq_wb_u8 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vddup.u8" } } */
+
++/*
++**foo1:
++** ...
++** vddup.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint32_t *a)
+ {
+ return vddupq_u8 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vddup.u8" } } */
++/*
++**foo2:
++** ...
++** vddup.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 ()
++{
++ return vddupq_u8 (1, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_n_u16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint32_t a, mve_pred16_t p)
+ {
+ return vddupq_x_n_u16 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vddupt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint32_t a, mve_pred16_t p)
+ {
+ return vddupq_x_u16 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vddupt.u16" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (mve_pred16_t p)
++{
++ return vddupq_x_u16 (1, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_n_u32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32_t a, mve_pred16_t p)
+ {
+- return vddupq_x_n_u32 (a, 4, p);
++ return vddupq_x_n_u32 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vddupt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32_t a, mve_pred16_t p)
+ {
+- return vddupq_x_u32 (a, 4, p);
++ return vddupq_x_u32 (a, 1, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (mve_pred16_t p)
++{
++ return vddupq_x_u32 (1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vddupt.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_n_u8.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint32_t a, mve_pred16_t p)
+ {
+- return vddupq_x_n_u8 (a, 4, p);
++ return vddupq_x_n_u8 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vddupt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint32_t a, mve_pred16_t p)
+ {
+- return vddupq_x_u8 (a, 4, p);
++ return vddupq_x_u8 (a, 1, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (mve_pred16_t p)
++{
++ return vddupq_x_u8 (1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vddupt.u8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_wb_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_wb_u16.c
+@@ -1,25 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
+-uint32_t *a;
++#ifdef __cplusplus
++extern "C" {
++#endif
+
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo (mve_pred16_t p)
++foo (uint32_t *a, mve_pred16_t p)
+ {
+- return vddupq_x_wb_u16 (a, 2, p);
++ return vddupq_x_wb_u16 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vddupt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo1 (mve_pred16_t p)
++foo1 (uint32_t *a, mve_pred16_t p)
+ {
+- return vddupq_x_u16 (a, 2, p);
++ return vddupq_x_u16 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vddupt.u16" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (mve_pred16_t p)
++{
++ return vddupq_x_u16 (1, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_wb_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_wb_u32.c
+@@ -1,25 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
+-uint32_t *a;
++#ifdef __cplusplus
++extern "C" {
++#endif
+
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (mve_pred16_t p)
++foo (uint32_t *a, mve_pred16_t p)
+ {
+- return vddupq_x_wb_u32 (a, 8, p);
++ return vddupq_x_wb_u32 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vddupt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (mve_pred16_t p)
++foo1 (uint32_t *a, mve_pred16_t p)
+ {
+- return vddupq_x_u32 (a, 8, p);
++ return vddupq_x_u32 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vddupt.u32" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (mve_pred16_t p)
++{
++ return vddupq_x_u32 (1, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_wb_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_wb_u8.c
+@@ -1,25 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
+-uint32_t *a;
++#ifdef __cplusplus
++extern "C" {
++#endif
+
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo (mve_pred16_t p)
++foo (uint32_t *a, mve_pred16_t p)
+ {
+- return vddupq_x_wb_u8 (a, 8, p);
++ return vddupq_x_wb_u8 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vddupt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo1 (mve_pred16_t p)
++foo1 (uint32_t *a, mve_pred16_t p)
+ {
+- return vddupq_x_u8 (a, 8, p);
++ return vddupq_x_u8 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vddupt.u8" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vddupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (mve_pred16_t p)
++{
++ return vddupq_x_u8 (1, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_f16.c
+@@ -1,22 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16_t a, mve_pred16_t p)
+ {
+ return vdupq_m_n_f16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdupt.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16_t a, mve_pred16_t p)
+ {
+ return vdupq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float16x8_t
++foo2 (float16x8_t inactive, mve_pred16_t p)
++{
++ return vdupq_m (inactive, 1.1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_f32.c
+@@ -1,22 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float32_t a, mve_pred16_t p)
+ {
+ return vdupq_m_n_f32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdupt.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, float32_t a, mve_pred16_t p)
+ {
+ return vdupq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float32x4_t
++foo2 (float32x4_t inactive, mve_pred16_t p)
++{
++ return vdupq_m (inactive, 1.1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16_t a, mve_pred16_t p)
+ {
+ return vdupq_m_n_s16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdupt.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16_t a, mve_pred16_t p)
+ {
+ return vdupq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32_t a, mve_pred16_t p)
+ {
+ return vdupq_m_n_s32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdupt.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32_t a, mve_pred16_t p)
+ {
+ return vdupq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8_t a, mve_pred16_t p)
+ {
+ return vdupq_m_n_s8 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdupt.8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8_t a, mve_pred16_t p)
+ {
+ return vdupq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_u16.c
+@@ -1,22 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16_t a, mve_pred16_t p)
+ {
+ return vdupq_m_n_u16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdupt.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16_t a, mve_pred16_t p)
+ {
+ return vdupq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t inactive, mve_pred16_t p)
++{
++ return vdupq_m (inactive, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_u32.c
+@@ -1,22 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32_t a, mve_pred16_t p)
+ {
+ return vdupq_m_n_u32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdupt.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32_t a, mve_pred16_t p)
+ {
+ return vdupq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t inactive, mve_pred16_t p)
++{
++ return vdupq_m (inactive, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_u8.c
+@@ -1,22 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8_t a, mve_pred16_t p)
+ {
+ return vdupq_m_n_u8 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdupt.8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8_t a, mve_pred16_t p)
+ {
+ return vdupq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t inactive, mve_pred16_t p)
++{
++ return vdupq_m (inactive, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_n_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_n_f16.c
+@@ -1,13 +1,40 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vdup.16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16_t a)
+ {
+ return vdupq_n_f16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vdup.16" } } */
++/*
++**foo1:
++** ...
++** vdup.16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float16x8_t
++foo1 ()
++{
++ return vdupq_n_f16 (1.1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_n_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_n_f32.c
+@@ -1,13 +1,40 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vdup.32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32_t a)
+ {
+ return vdupq_n_f32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vdup.32" } } */
++/*
++**foo1:
++** ...
++** vdup.32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float32x4_t
++foo1 ()
++{
++ return vdupq_n_f32 (1.1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_n_s16.c
+@@ -1,13 +1,28 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vdup.16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16_t a)
+ {
+ return vdupq_n_s16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vdup.16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_n_s32.c
+@@ -1,13 +1,28 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vdup.32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32_t a)
+ {
+ return vdupq_n_s32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vdup.32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_n_s8.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vdup.8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8_t a)
+ {
+ return vdupq_n_s8 (a);
+ }
+
+-/* { dg-final { scan-assembler "vdup.8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_n_u16.c
+@@ -1,13 +1,40 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vdup.16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16_t a)
+ {
+- return vdupq_n_u16 (a);
++ return vdupq_n_u16 (a);
++}
++
++/*
++**foo1:
++** ...
++** vdup.16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo1 ()
++{
++ return vdupq_n_u16 (1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vdup.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_n_u32.c
+@@ -1,13 +1,40 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vdup.32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32_t a)
+ {
+- return vdupq_n_u32 (a);
++ return vdupq_n_u32 (a);
++}
++
++/*
++**foo1:
++** ...
++** vdup.32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo1 ()
++{
++ return vdupq_n_u32 (1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vdup.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_n_u8.c
+@@ -1,13 +1,40 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vdup.8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8_t a)
+ {
+- return vdupq_n_u8 (a);
++ return vdupq_n_u8 (a);
++}
++
++/*
++**foo1:
++** ...
++** vdup.8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo1 ()
++{
++ return vdupq_n_u8 (1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vdup.8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_f16.c
+@@ -1,14 +1,48 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16_t a, mve_pred16_t p)
+ {
+ return vdupq_x_n_f16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdupt.16" } } */
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float16x8_t
++foo1 (mve_pred16_t p)
++{
++ return vdupq_x_n_f16 (1.1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_f32.c
+@@ -1,14 +1,48 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32_t a, mve_pred16_t p)
+ {
+ return vdupq_x_n_f32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdupt.32" } } */
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float32x4_t
++foo1 (mve_pred16_t p)
++{
++ return vdupq_x_n_f32 (1.1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_s16.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16_t a, mve_pred16_t p)
+ {
+ return vdupq_x_n_s16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdupt.16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_s32.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32_t a, mve_pred16_t p)
+ {
+ return vdupq_x_n_s32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdupt.32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_s8.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8_t a, mve_pred16_t p)
+ {
+ return vdupq_x_n_s8 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdupt.8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_u16.c
+@@ -1,14 +1,48 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16_t a, mve_pred16_t p)
+ {
+ return vdupq_x_n_u16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdupt.16" } } */
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo1 (mve_pred16_t p)
++{
++ return vdupq_x_n_u16 (1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_u32.c
+@@ -1,14 +1,48 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32_t a, mve_pred16_t p)
+ {
+ return vdupq_x_n_u32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdupt.32" } } */
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo1 (mve_pred16_t p)
++{
++ return vdupq_x_n_u32 (1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_u8.c
+@@ -1,14 +1,48 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8_t a, mve_pred16_t p)
+ {
+ return vdupq_x_n_u8 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdupt.8" } } */
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdupt.8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo1 (mve_pred16_t p)
++{
++ return vdupq_x_n_u8 (1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_m_n_u16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint32_t a, uint32_t b, mve_pred16_t p)
+ {
+- return vdwdupq_m (inactive, a, b, 1, p);
++ return vdwdupq_m_n_u16 (inactive, a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdwdupt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint32_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vdwdupq_m (inactive, a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdwdupt.u16" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t inactive, mve_pred16_t p)
++{
++ return vdwdupq_m (inactive, 1, 1, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_m_n_u32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32_t a, uint32_t b, mve_pred16_t p)
+ {
+- return vdwdupq_m (inactive, a, b, 4, p);
++ return vdwdupq_m_n_u32 (inactive, a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdwdupt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32_t a, uint32_t b, mve_pred16_t p)
+ {
+- return vdwdupq_m (inactive, a, b, 4, p);
++ return vdwdupq_m (inactive, a, b, 1, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t inactive, mve_pred16_t p)
++{
++ return vdwdupq_m (inactive, 1, 1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdwdupt.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_m_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_m_n_u8.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint32_t a, uint32_t b, mve_pred16_t p)
+ {
+- return vdwdupq_m (inactive, a, b, 4, p);
++ return vdwdupq_m_n_u8 (inactive, a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdwdupt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint32_t a, uint32_t b, mve_pred16_t p)
+ {
+- return vdwdupq_m (inactive, a, b, 4, p);
++ return vdwdupq_m (inactive, a, b, 1, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t inactive, mve_pred16_t p)
++{
++ return vdwdupq_m (inactive, 1, 1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdwdupt.u8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_m_wb_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_m_wb_u16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo (uint16x8_t inactive, uint32_t * a, uint32_t b, mve_pred16_t p)
++foo (uint16x8_t inactive, uint32_t *a, uint32_t b, mve_pred16_t p)
+ {
+- return vdwdupq_m (inactive, a, b, 8, p);
++ return vdwdupq_m_wb_u16 (inactive, a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdwdupt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo1 (uint16x8_t inactive, uint32_t * a, uint32_t b, mve_pred16_t p)
++foo1 (uint16x8_t inactive, uint32_t *a, uint32_t b, mve_pred16_t p)
+ {
+- return vdwdupq_m (inactive, a, b, 8, p);
++ return vdwdupq_m (inactive, a, b, 1, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t inactive, mve_pred16_t p)
++{
++ return vdwdupq_m (inactive, 1, 1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdwdupt.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_m_wb_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_m_wb_u32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint32x4_t inactive, uint32_t * a, uint32_t b, mve_pred16_t p)
++foo (uint32x4_t inactive, uint32_t *a, uint32_t b, mve_pred16_t p)
+ {
+- return vdwdupq_m (inactive, a, b, 1, p);
++ return vdwdupq_m_wb_u32 (inactive, a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdwdupt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (uint32x4_t inactive, uint32_t * a, uint32_t b, mve_pred16_t p)
++foo1 (uint32x4_t inactive, uint32_t *a, uint32_t b, mve_pred16_t p)
+ {
+ return vdwdupq_m (inactive, a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdwdupt.u32" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t inactive, mve_pred16_t p)
++{
++ return vdwdupq_m (inactive, 1, 1, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_m_wb_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_m_wb_u8.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo (uint8x16_t inactive, uint32_t * a, uint32_t b, mve_pred16_t p)
++foo (uint8x16_t inactive, uint32_t *a, uint32_t b, mve_pred16_t p)
+ {
+- return vdwdupq_m (inactive, a, b, 2, p);
++ return vdwdupq_m_wb_u8 (inactive, a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdwdupt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo1 (uint8x16_t inactive, uint32_t * a, uint32_t b, mve_pred16_t p)
++foo1 (uint8x16_t inactive, uint32_t *a, uint32_t b, mve_pred16_t p)
+ {
+- return vdwdupq_m (inactive, a, b, 2, p);
++ return vdwdupq_m (inactive, a, b, 1, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t inactive, mve_pred16_t p)
++{
++ return vdwdupq_m (inactive, 1, 1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdwdupt.u8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_n_u16.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vdwdup.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint32_t a, uint32_t b)
+ {
+- return vdwdupq_n_u16 (a, b, 2);
++ return vdwdupq_n_u16 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vdwdup.u16" } } */
+
++/*
++**foo1:
++** ...
++** vdwdup.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint32_t a, uint32_t b)
+ {
+- return vdwdupq_u16 (a, b, 2);
++ return vdwdupq_u16 (a, b, 1);
++}
++
++/*
++**foo2:
++** ...
++** vdwdup.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 ()
++{
++ return vdwdupq_u16 (1, 1, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vdwdup.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_n_u32.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vdwdup.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32_t a, uint32_t b)
+ {
+- return vdwdupq_n_u32 (a, b, 8);
++ return vdwdupq_n_u32 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vdwdup.u32" } } */
+
++/*
++**foo1:
++** ...
++** vdwdup.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32_t a, uint32_t b)
+ {
+- return vdwdupq_u32 (a, b, 8);
++ return vdwdupq_u32 (a, b, 1);
++}
++
++/*
++**foo2:
++** ...
++** vdwdup.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 ()
++{
++ return vdwdupq_u32 (1, 1, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vdwdup.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_n_u8.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vdwdup.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint32_t a, uint32_t b)
+ {
+- return vdwdupq_n_u8 (a, b, 4);
++ return vdwdupq_n_u8 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vdwdup.u8" } } */
+
++/*
++**foo1:
++** ...
++** vdwdup.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint32_t a, uint32_t b)
+ {
+- return vdwdupq_u8 (a, b, 4);
++ return vdwdupq_u8 (a, b, 1);
++}
++
++/*
++**foo2:
++** ...
++** vdwdup.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 ()
++{
++ return vdwdupq_u8 (1, 1, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vdwdup.u8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_wb_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_wb_u16.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vdwdup.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint32_t *a, uint32_t b)
+ {
+- return vdwdupq_wb_u16 (a, b, 2);
++ return vdwdupq_wb_u16 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vdwdup.u16" } } */
+
++/*
++**foo1:
++** ...
++** vdwdup.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint32_t *a, uint32_t b)
+ {
+- return vdwdupq_u16 (a, b, 2);
++ return vdwdupq_u16 (a, b, 1);
++}
++
++/*
++**foo2:
++** ...
++** vdwdup.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 ()
++{
++ return vdwdupq_u16 (1, 1, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vdwdup.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_wb_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_wb_u32.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vdwdup.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32_t *a, uint32_t b)
+ {
+- return vdwdupq_wb_u32 (a, b, 8);
++ return vdwdupq_wb_u32 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vdwdup.u32" } } */
+
++/*
++**foo1:
++** ...
++** vdwdup.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32_t *a, uint32_t b)
+ {
+- return vdwdupq_u32 (a, b, 8);
++ return vdwdupq_u32 (a, b, 1);
++}
++
++/*
++**foo2:
++** ...
++** vdwdup.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 ()
++{
++ return vdwdupq_u32 (1, 1, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vdwdup.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_wb_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_wb_u8.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vdwdup.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint32_t *a, uint32_t b)
+ {
+- return vdwdupq_wb_u8 (a, b, 4);
++ return vdwdupq_wb_u8 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vdwdup.u8" } } */
+
++/*
++**foo1:
++** ...
++** vdwdup.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint32_t *a, uint32_t b)
+ {
+- return vdwdupq_u8 (a, b, 4);
++ return vdwdupq_u8 (a, b, 1);
++}
++
++/*
++**foo2:
++** ...
++** vdwdup.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 ()
++{
++ return vdwdupq_u8 (1, 1, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vdwdup.u8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_n_u16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint32_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vdwdupq_x_n_u16 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdwdupt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint32_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vdwdupq_x_u16 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdwdupt.u16" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (mve_pred16_t p)
++{
++ return vdwdupq_x_u16 (1, 1, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_n_u32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32_t a, uint32_t b, mve_pred16_t p)
+ {
+- return vdwdupq_x_n_u32 (a, b, 4, p);
++ return vdwdupq_x_n_u32 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdwdupt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32_t a, uint32_t b, mve_pred16_t p)
+ {
+- return vdwdupq_x_u32 (a, b, 4, p);
++ return vdwdupq_x_u32 (a, b, 1, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (mve_pred16_t p)
++{
++ return vdwdupq_x_u32 (1, 1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdwdupt.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_n_u8.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint32_t a, uint32_t b, mve_pred16_t p)
+ {
+- return vdwdupq_x_n_u8 (a, b, 4, p);
++ return vdwdupq_x_n_u8 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdwdupt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint32_t a, uint32_t b, mve_pred16_t p)
+ {
+- return vdwdupq_x_u8 (a, b, 4, p);
++ return vdwdupq_x_u8 (a, b, 1, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (mve_pred16_t p)
++{
++ return vdwdupq_x_u8 (1, 1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdwdupt.u8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_wb_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_wb_u16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo (uint32_t * a, uint32_t b, mve_pred16_t p)
++foo (uint32_t *a, uint32_t b, mve_pred16_t p)
+ {
+- return vdwdupq_x_wb_u16 (a, b, 8, p);
++ return vdwdupq_x_wb_u16 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdwdupt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo1 (uint32_t * a, uint32_t b, mve_pred16_t p)
++foo1 (uint32_t *a, uint32_t b, mve_pred16_t p)
+ {
+- return vdwdupq_x_u16 (a, b, 8, p);
++ return vdwdupq_x_u16 (a, b, 1, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (mve_pred16_t p)
++{
++ return vdwdupq_x_u16 (1, 1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdwdupt.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_wb_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_wb_u32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint32_t * a, uint32_t b, mve_pred16_t p)
++foo (uint32_t *a, uint32_t b, mve_pred16_t p)
+ {
+ return vdwdupq_x_wb_u32 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdwdupt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (uint32_t * a, uint32_t b, mve_pred16_t p)
++foo1 (uint32_t *a, uint32_t b, mve_pred16_t p)
+ {
+ return vdwdupq_x_u32 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdwdupt.u32" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (mve_pred16_t p)
++{
++ return vdwdupq_x_u32 (1, 1, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_wb_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_wb_u8.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo (uint32_t * a, uint32_t b, mve_pred16_t p)
++foo (uint32_t *a, uint32_t b, mve_pred16_t p)
+ {
+- return vdwdupq_x_wb_u8 (a, b, 2, p);
++ return vdwdupq_x_wb_u8 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdwdupt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo1 (uint32_t * a, uint32_t b, mve_pred16_t p)
++foo1 (uint32_t *a, uint32_t b, mve_pred16_t p)
+ {
+- return vdwdupq_x_u8 (a, b, 2, p);
++ return vdwdupq_x_u8 (a, b, 1, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vdwdupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (mve_pred16_t p)
++{
++ return vdwdupq_x_u8 (1, 1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vdwdupt.u8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_f16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** veor q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b)
+ {
+ return veorq_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "veor" } } */
+
++/*
++**foo1:
++** ...
++** veor q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b)
+ {
+ return veorq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "veor" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** veor q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b)
+ {
+ return veorq_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "veor" } } */
+
++/*
++**foo1:
++** ...
++** veor q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b)
+ {
+ return veorq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "veor" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_m_f16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return veorq_m_f16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return veorq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_m_f32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return veorq_m_f32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return veorq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return veorq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return veorq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return veorq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return veorq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return veorq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return veorq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_m_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return veorq_m_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return veorq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_m_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return veorq_m_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return veorq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_m_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return veorq_m_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return veorq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** veor q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return veorq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "veor" } } */
+
++/*
++**foo1:
++** ...
++** veor q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return veorq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "veor" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** veor q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return veorq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "veor" } } */
+
++/*
++**foo1:
++** ...
++** veor q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return veorq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "veor" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** veor q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return veorq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "veor" } } */
+
++/*
++**foo1:
++** ...
++** veor q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return veorq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "veor" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** veor q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b)
+ {
+ return veorq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "veor" } } */
+
++/*
++**foo1:
++** ...
++** veor q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b)
+ {
+ return veorq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "veor" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** veor q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b)
+ {
+ return veorq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "veor" } } */
+
++/*
++**foo1:
++** ...
++** veor q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b)
+ {
+ return veorq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "veor" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** veor q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+ return veorq_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "veor" } } */
+
++/*
++**foo1:
++** ...
++** veor q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b)
+ {
+ return veorq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "veor" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_f16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return veorq_x_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return veorq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_f32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return veorq_x_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return veorq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return veorq_x_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return veorq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return veorq_x_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return veorq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return veorq_x_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return veorq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return veorq_x_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return veorq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return veorq_x_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return veorq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return veorq_x_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** veort q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return veorq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "veort" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmaq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmaq_f16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vfma.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+-foo (float16x8_t a, float16x8_t b, float16x8_t c)
++foo (float16x8_t add, float16x8_t m1, float16x8_t m2)
+ {
+- return vfmaq_f16 (a, b, c);
++ return vfmaq_f16 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vfma.f16" } } */
+
++/*
++**foo1:
++** ...
++** vfma.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+-foo1 (float16x8_t a, float16x8_t b, float16x8_t c)
++foo1 (float16x8_t add, float16x8_t m1, float16x8_t m2)
+ {
+- return vfmaq (a, b, c);
++ return vfmaq (add, m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vfma.f16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmaq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmaq_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vfma.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo (float32x4_t a, float32x4_t b, float32x4_t c)
++foo (float32x4_t add, float32x4_t m1, float32x4_t m2)
+ {
+- return vfmaq_f32 (a, b, c);
++ return vfmaq_f32 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vfma.f32" } } */
+
++/*
++**foo1:
++** ...
++** vfma.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo1 (float32x4_t a, float32x4_t b, float32x4_t c)
++foo1 (float32x4_t add, float32x4_t m1, float32x4_t m2)
+ {
+- return vfmaq (a, b, c);
++ return vfmaq (add, m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vfma.f32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmaq_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmaq_m_f16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vfmat.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+-foo (float16x8_t a, float16x8_t b, float16x8_t c, mve_pred16_t p)
++foo (float16x8_t add, float16x8_t m1, float16x8_t m2, mve_pred16_t p)
+ {
+- return vfmaq_m_f16 (a, b, c, p);
++ return vfmaq_m_f16 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vfmat.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vfmat.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+-foo1 (float16x8_t a, float16x8_t b, float16x8_t c, mve_pred16_t p)
++foo1 (float16x8_t add, float16x8_t m1, float16x8_t m2, mve_pred16_t p)
+ {
+- return vfmaq_m (a, b, c, p);
++ return vfmaq_m (add, m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vfmat.f16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmaq_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmaq_m_f32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vfmat.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo (float32x4_t a, float32x4_t b, float32x4_t c, mve_pred16_t p)
++foo (float32x4_t add, float32x4_t m1, float32x4_t m2, mve_pred16_t p)
+ {
+- return vfmaq_m_f32 (a, b, c, p);
++ return vfmaq_m_f32 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vfmat.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vfmat.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo1 (float32x4_t a, float32x4_t b, float32x4_t c, mve_pred16_t p)
++foo1 (float32x4_t add, float32x4_t m1, float32x4_t m2, mve_pred16_t p)
+ {
+- return vfmaq_m (a, b, c, p);
++ return vfmaq_m (add, m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vfmat.f32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmaq_m_n_f16-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float16x8_t
+-foo1 (float16x8_t a, float16x8_t b, float16_t c, mve_pred16_t p)
+-{
+- return vfmaq_m (a, b, 23.23, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmaq_m_n_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmaq_m_n_f16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vfmat.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+-foo (float16x8_t a, float16x8_t b, float16_t c, mve_pred16_t p)
++foo (float16x8_t add, float16x8_t m1, float16_t m2, mve_pred16_t p)
+ {
+- return vfmaq_m_n_f16 (a, b, c, p);
++ return vfmaq_m_n_f16 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vfmat.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vfmat.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+-foo1 (float16x8_t a, float16x8_t b, float16_t c, mve_pred16_t p)
++foo1 (float16x8_t add, float16x8_t m1, float16_t m2, mve_pred16_t p)
+ {
+- return vfmaq_m (a, b, c, p);
++ return vfmaq_m (add, m1, m2, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vfmat.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float16x8_t
++foo2 (float16x8_t add, float16x8_t m1, mve_pred16_t p)
++{
++ return vfmaq_m (add, m1, 1.1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vfmat.f16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmaq_m_n_f32-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float32x4_t
+-foo1 (float32x4_t a, float32x4_t b, float32_t c, mve_pred16_t p)
+-{
+- return vfmaq_m (a, b, 23.23, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmaq_m_n_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmaq_m_n_f32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vfmat.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo (float32x4_t a, float32x4_t b, float32_t c, mve_pred16_t p)
++foo (float32x4_t add, float32x4_t m1, float32_t m2, mve_pred16_t p)
+ {
+- return vfmaq_m_n_f32 (a, b, c, p);
++ return vfmaq_m_n_f32 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vfmat.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vfmat.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo1 (float32x4_t a, float32x4_t b, float32_t c, mve_pred16_t p)
++foo1 (float32x4_t add, float32x4_t m1, float32_t m2, mve_pred16_t p)
+ {
+- return vfmaq_m (a, b, c, p);
++ return vfmaq_m (add, m1, m2, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vfmat.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float32x4_t
++foo2 (float32x4_t add, float32x4_t m1, mve_pred16_t p)
++{
++ return vfmaq_m (add, m1, 1.1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vfmat.f32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmaq_n_f16-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float16x8_t
+-foo1 (float16x8_t a, float16x8_t b, float16_t c)
+-{
+- return vfmaq (a, b, 23.23);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmaq_n_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmaq_n_f16.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vfma.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+-foo (float16x8_t a, float16x8_t b, float16_t c)
++foo (float16x8_t add, float16x8_t m1, float16_t m2)
+ {
+- return vfmaq_n_f16 (a, b, c);
++ return vfmaq_n_f16 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vfma.f16" } } */
+
++/*
++**foo1:
++** ...
++** vfma.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+-foo1 (float16x8_t a, float16x8_t b, float16_t c)
++foo1 (float16x8_t add, float16x8_t m1, float16_t m2)
+ {
+- return vfmaq (a, b, c);
++ return vfmaq (add, m1, m2);
++}
++
++/*
++**foo2:
++** ...
++** vfma.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float16x8_t
++foo2 (float16x8_t add, float16x8_t m1)
++{
++ return vfmaq (add, m1, 1.1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vfma.f16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmaq_n_f32-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float32x4_t
+-foo1 (float32x4_t a, float32x4_t b, float32_t c)
+-{
+- return vfmaq (a, b, 23.23);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmaq_n_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmaq_n_f32.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vfma.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo (float32x4_t a, float32x4_t b, float32_t c)
++foo (float32x4_t add, float32x4_t m1, float32_t m2)
+ {
+- return vfmaq_n_f32 (a, b, c);
++ return vfmaq_n_f32 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vfma.f32" } } */
+
++/*
++**foo1:
++** ...
++** vfma.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo1 (float32x4_t a, float32x4_t b, float32_t c)
++foo1 (float32x4_t add, float32x4_t m1, float32_t m2)
+ {
+- return vfmaq (a, b, c);
++ return vfmaq (add, m1, m2);
++}
++
++/*
++**foo2:
++** ...
++** vfma.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float32x4_t
++foo2 (float32x4_t add, float32x4_t m1)
++{
++ return vfmaq (add, m1, 1.1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vfma.f32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmasq_m_n_f16-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float16x8_t
+-foo1 (float16x8_t a, float16x8_t b, float16_t c, mve_pred16_t p)
+-{
+- return vfmasq_m (a, b, 23.23, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmasq_m_n_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmasq_m_n_f16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vfmast.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+-foo (float16x8_t a, float16x8_t b, float16_t c, mve_pred16_t p)
++foo (float16x8_t m1, float16x8_t m2, float16_t add, mve_pred16_t p)
+ {
+- return vfmasq_m_n_f16 (a, b, c, p);
++ return vfmasq_m_n_f16 (m1, m2, add, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vfmast.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vfmast.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+-foo1 (float16x8_t a, float16x8_t b, float16_t c, mve_pred16_t p)
++foo1 (float16x8_t m1, float16x8_t m2, float16_t add, mve_pred16_t p)
+ {
+- return vfmasq_m (a, b, c, p);
++ return vfmasq_m (m1, m2, add, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vfmast.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float16x8_t
++foo2 (float16x8_t m1, float16x8_t m2, mve_pred16_t p)
++{
++ return vfmasq_m (m1, m2, 1.1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vfmast.f16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmasq_m_n_f32-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float32x4_t
+-foo1 (float32x4_t a, float32x4_t b, float32_t c, mve_pred16_t p)
+-{
+- return vfmasq_m (a, b, 23.23, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmasq_m_n_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmasq_m_n_f32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vfmast.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo (float32x4_t a, float32x4_t b, float32_t c, mve_pred16_t p)
++foo (float32x4_t m1, float32x4_t m2, float32_t add, mve_pred16_t p)
+ {
+- return vfmasq_m_n_f32 (a, b, c, p);
++ return vfmasq_m_n_f32 (m1, m2, add, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vfmast.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vfmast.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo1 (float32x4_t a, float32x4_t b, float32_t c, mve_pred16_t p)
++foo1 (float32x4_t m1, float32x4_t m2, float32_t add, mve_pred16_t p)
+ {
+- return vfmasq_m (a, b, c, p);
++ return vfmasq_m (m1, m2, add, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vfmast.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float32x4_t
++foo2 (float32x4_t m1, float32x4_t m2, mve_pred16_t p)
++{
++ return vfmasq_m (m1, m2, 1.1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vfmast.f32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmasq_n_f16-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float16x8_t
+-foo1 (float16x8_t a, float16x8_t b, float16_t c)
+-{
+- return vfmasq (a, b, 23.23);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmasq_n_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmasq_n_f16.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vfmas.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+-foo (float16x8_t a, float16x8_t b, float16_t c)
++foo (float16x8_t m1, float16x8_t m2, float16_t add)
+ {
+- return vfmasq_n_f16 (a, b, c);
++ return vfmasq_n_f16 (m1, m2, add);
+ }
+
+-/* { dg-final { scan-assembler "vfmas.f16" } } */
+
++/*
++**foo1:
++** ...
++** vfmas.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+-foo1 (float16x8_t a, float16x8_t b, float16_t c)
++foo1 (float16x8_t m1, float16x8_t m2, float16_t add)
+ {
+- return vfmasq (a, b, c);
++ return vfmasq (m1, m2, add);
++}
++
++/*
++**foo2:
++** ...
++** vfmas.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float16x8_t
++foo2 (float16x8_t m1, float16x8_t m2)
++{
++ return vfmasq (m1, m2, 1.1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vfmas.f16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmasq_n_f32-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float32x4_t
+-foo1 (float32x4_t a, float32x4_t b, float32_t c)
+-{
+- return vfmasq (a, b, 23.23);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmasq_n_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmasq_n_f32.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vfmas.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo (float32x4_t a, float32x4_t b, float32_t c)
++foo (float32x4_t m1, float32x4_t m2, float32_t add)
+ {
+- return vfmasq_n_f32 (a, b, c);
++ return vfmasq_n_f32 (m1, m2, add);
+ }
+
+-/* { dg-final { scan-assembler "vfmas.f32" } } */
+
++/*
++**foo1:
++** ...
++** vfmas.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo1 (float32x4_t a, float32x4_t b, float32_t c)
++foo1 (float32x4_t m1, float32x4_t m2, float32_t add)
+ {
+- return vfmasq (a, b, c);
++ return vfmasq (m1, m2, add);
++}
++
++/*
++**foo2:
++** ...
++** vfmas.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float32x4_t
++foo2 (float32x4_t m1, float32x4_t m2)
++{
++ return vfmasq (m1, m2, 1.1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vfmas.f32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmsq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmsq_f16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vfms.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+-foo (float16x8_t a, float16x8_t b, float16x8_t c)
++foo (float16x8_t add, float16x8_t m1, float16x8_t m2)
+ {
+- return vfmsq_f16 (a, b, c);
++ return vfmsq_f16 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vfms.f16" } } */
+
++/*
++**foo1:
++** ...
++** vfms.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+-foo1 (float16x8_t a, float16x8_t b, float16x8_t c)
++foo1 (float16x8_t add, float16x8_t m1, float16x8_t m2)
+ {
+- return vfmsq (a, b, c);
++ return vfmsq (add, m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vfms.f16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmsq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmsq_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vfms.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo (float32x4_t a, float32x4_t b, float32x4_t c)
++foo (float32x4_t add, float32x4_t m1, float32x4_t m2)
+ {
+- return vfmsq_f32 (a, b, c);
++ return vfmsq_f32 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vfms.f32" } } */
+
++/*
++**foo1:
++** ...
++** vfms.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo1 (float32x4_t a, float32x4_t b, float32x4_t c)
++foo1 (float32x4_t add, float32x4_t m1, float32x4_t m2)
+ {
+- return vfmsq (a, b, c);
++ return vfmsq (add, m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vfms.f32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmsq_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmsq_m_f16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vfmst.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+-foo (float16x8_t a, float16x8_t b, float16x8_t c, mve_pred16_t p)
++foo (float16x8_t add, float16x8_t m1, float16x8_t m2, mve_pred16_t p)
+ {
+- return vfmsq_m_f16 (a, b, c, p);
++ return vfmsq_m_f16 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vfmst.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vfmst.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+-foo1 (float16x8_t a, float16x8_t b, float16x8_t c, mve_pred16_t p)
++foo1 (float16x8_t add, float16x8_t m1, float16x8_t m2, mve_pred16_t p)
+ {
+- return vfmsq_m (a, b, c, p);
++ return vfmsq_m (add, m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vfmst.f16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmsq_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmsq_m_f32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vfmst.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo (float32x4_t a, float32x4_t b, float32x4_t c, mve_pred16_t p)
++foo (float32x4_t add, float32x4_t m1, float32x4_t m2, mve_pred16_t p)
+ {
+- return vfmsq_m_f32 (a, b, c, p);
++ return vfmsq_m_f32 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vfmst.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vfmst.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo1 (float32x4_t a, float32x4_t b, float32x4_t c, mve_pred16_t p)
++foo1 (float32x4_t add, float32x4_t m1, float32x4_t m2, mve_pred16_t p)
+ {
+- return vfmsq_m (a, b, c, p);
++ return vfmsq_m (add, m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vfmst.f32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vhaddq_m_n_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vhaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vhaddq_m_n_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vhaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_m_n_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vhaddq_m_n_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vhaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_m_n_u16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, uint16_t b, mve_pred16_t p)
+ {
+ return vhaddq_m_n_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, uint16_t b, mve_pred16_t p)
+ {
+ return vhaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.u16" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t inactive, uint16x8_t a, mve_pred16_t p)
++{
++ return vhaddq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_m_n_u32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vhaddq_m_n_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vhaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.u32" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t inactive, uint32x4_t a, mve_pred16_t p)
++{
++ return vhaddq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_m_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_m_n_u8.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, uint8_t b, mve_pred16_t p)
+ {
+ return vhaddq_m_n_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, uint8_t b, mve_pred16_t p)
+ {
+ return vhaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.u8" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
++{
++ return vhaddq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vhaddq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vhaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vhaddq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vhaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vhaddq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vhaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_m_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vhaddq_m_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vhaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_m_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vhaddq_m_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vhaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_m_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vhaddq_m_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vhaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vhadd.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16_t b)
+ {
+ return vhaddq_n_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhadd.s16" } } */
+
++/*
++**foo1:
++** ...
++** vhadd.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16_t b)
+ {
+ return vhaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhadd.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vhadd.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32_t b)
+ {
+ return vhaddq_n_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhadd.s32" } } */
+
++/*
++**foo1:
++** ...
++** vhadd.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32_t b)
+ {
+ return vhaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhadd.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vhadd.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8_t b)
+ {
+ return vhaddq_n_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhadd.s8" } } */
+
++/*
++**foo1:
++** ...
++** vhadd.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8_t b)
+ {
+ return vhaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhadd.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_u16.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vhadd.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16_t b)
+ {
+ return vhaddq_n_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhadd.u16" } } */
+
++/*
++**foo1:
++** ...
++** vhadd.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16_t b)
+ {
+ return vhaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhadd.u16" } } */
++/*
++**foo2:
++** ...
++** vhadd.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t a)
++{
++ return vhaddq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_u32.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vhadd.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32_t b)
+ {
+ return vhaddq_n_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhadd.u32" } } */
+
++/*
++**foo1:
++** ...
++** vhadd.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32_t b)
+ {
+ return vhaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhadd.u32" } } */
++/*
++**foo2:
++** ...
++** vhadd.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t a)
++{
++ return vhaddq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_u8.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vhadd.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8_t b)
+ {
+ return vhaddq_n_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhadd.u8" } } */
+
++/*
++**foo1:
++** ...
++** vhadd.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8_t b)
+ {
+ return vhaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhadd.u8" } } */
++/*
++**foo2:
++** ...
++** vhadd.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t a)
++{
++ return vhaddq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vhadd.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vhaddq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhadd.s16" } } */
+
++/*
++**foo1:
++** ...
++** vhadd.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vhaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhadd.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vhadd.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vhaddq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhadd.s32" } } */
+
++/*
++**foo1:
++** ...
++** vhadd.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vhaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhadd.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vhadd.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vhaddq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhadd.s8" } } */
+
++/*
++**foo1:
++** ...
++** vhadd.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vhaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhadd.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vhadd.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b)
+ {
+ return vhaddq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhadd.u16" } } */
+
++/*
++**foo1:
++** ...
++** vhadd.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b)
+ {
+ return vhaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhadd.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vhadd.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b)
+ {
+ return vhaddq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhadd.u32" } } */
+
++/*
++**foo1:
++** ...
++** vhadd.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b)
+ {
+ return vhaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhadd.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vhadd.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+ return vhaddq_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhadd.u8" } } */
+
++/*
++**foo1:
++** ...
++** vhadd.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b)
+ {
+ return vhaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhadd.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vhaddq_x_n_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vhaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vhaddq_x_n_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vhaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vhaddq_x_n_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vhaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_u16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16_t b, mve_pred16_t p)
+ {
+ return vhaddq_x_n_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16_t b, mve_pred16_t p)
+ {
+ return vhaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.u16" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t a, mve_pred16_t p)
++{
++ return vhaddq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_u32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vhaddq_x_n_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vhaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.u32" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t a, mve_pred16_t p)
++{
++ return vhaddq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_u8.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8_t b, mve_pred16_t p)
+ {
+ return vhaddq_x_n_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8_t b, mve_pred16_t p)
+ {
+ return vhaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.u8" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t a, mve_pred16_t p)
++{
++ return vhaddq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vhaddq_x_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vhaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vhaddq_x_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vhaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vhaddq_x_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vhaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vhaddq_x_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vhaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vhaddq_x_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vhaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vhaddq_x_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhaddt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhaddt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vhaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhcaddt.s16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vhcaddq_rot270_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhcaddt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhcaddt.s16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vhcaddq_rot270_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhcaddt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhcaddt.s32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vhcaddq_rot270_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhcaddt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhcaddt.s32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vhcaddq_rot270_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhcaddt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhcaddt.s8 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vhcaddq_rot270_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhcaddt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhcaddt.s8 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vhcaddq_rot270_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhcaddt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vhcadd.s16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vhcaddq_rot270_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhcadd.s16" } } */
+
++/*
++**foo1:
++** ...
++** vhcadd.s16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vhcaddq_rot270 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhcadd.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vhcadd.s32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vhcaddq_rot270_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhcadd.s32" } } */
+
++/*
++**foo1:
++** ...
++** vhcadd.s32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vhcaddq_rot270 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhcadd.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vhcadd.s8 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vhcaddq_rot270_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhcadd.s8" } } */
+
++/*
++**foo1:
++** ...
++** vhcadd.s8 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vhcaddq_rot270 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhcadd.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_x_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhcaddt.s16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vhcaddq_rot270_x_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhcaddt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhcaddt.s16 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vhcaddq_rot270_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_x_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_x_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhcaddt.s32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vhcaddq_rot270_x_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhcaddt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhcaddt.s32 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vhcaddq_rot270_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_x_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhcaddt.s8 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vhcaddq_rot270_x_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhcaddt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhcaddt.s8 q[0-9]+, q[0-9]+, q[0-9]+, #270(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vhcaddq_rot270_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhcaddt.s16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vhcaddq_rot90_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhcaddt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhcaddt.s16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vhcaddq_rot90_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhcaddt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhcaddt.s32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vhcaddq_rot90_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhcaddt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhcaddt.s32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vhcaddq_rot90_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhcaddt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhcaddt.s8 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vhcaddq_rot90_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhcaddt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhcaddt.s8 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vhcaddq_rot90_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhcaddt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vhcadd.s16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vhcaddq_rot90_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhcadd.s16" } } */
+
++/*
++**foo1:
++** ...
++** vhcadd.s16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vhcaddq_rot90 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhcadd.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vhcadd.s32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vhcaddq_rot90_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhcadd.s32" } } */
+
++/*
++**foo1:
++** ...
++** vhcadd.s32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vhcaddq_rot90 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhcadd.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vhcadd.s8 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vhcaddq_rot90_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhcadd.s8" } } */
+
++/*
++**foo1:
++** ...
++** vhcadd.s8 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vhcaddq_rot90 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhcadd.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_x_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhcaddt.s16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vhcaddq_rot90_x_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhcaddt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhcaddt.s16 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vhcaddq_rot90_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_x_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_x_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhcaddt.s32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vhcaddq_rot90_x_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhcaddt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhcaddt.s32 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vhcaddq_rot90_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_x_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhcaddt.s8 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vhcaddq_rot90_x_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhcaddt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhcaddt.s8 q[0-9]+, q[0-9]+, q[0-9]+, #90(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vhcaddq_rot90_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vhsubq_m_n_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vhsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vhsubq_m_n_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vhsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_m_n_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vhsubq_m_n_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vhsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_m_n_u16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, uint16_t b, mve_pred16_t p)
+ {
+ return vhsubq_m_n_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, uint16_t b, mve_pred16_t p)
+ {
+ return vhsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.u16" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t inactive, uint16x8_t a, mve_pred16_t p)
++{
++ return vhsubq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_m_n_u32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vhsubq_m_n_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vhsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.u32" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t inactive, uint32x4_t a, mve_pred16_t p)
++{
++ return vhsubq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_m_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_m_n_u8.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, uint8_t b, mve_pred16_t p)
+ {
+ return vhsubq_m_n_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, uint8_t b, mve_pred16_t p)
+ {
+ return vhsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.u8" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
++{
++ return vhsubq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vhsubq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vhsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vhsubq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vhsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vhsubq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vhsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_m_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vhsubq_m_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vhsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_m_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vhsubq_m_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vhsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_m_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vhsubq_m_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vhsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vhsub.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16_t b)
+ {
+ return vhsubq_n_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhsub.s16" } } */
+
++/*
++**foo1:
++** ...
++** vhsub.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16_t b)
+ {
+ return vhsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhsub.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vhsub.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32_t b)
+ {
+ return vhsubq_n_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhsub.s32" } } */
+
++/*
++**foo1:
++** ...
++** vhsub.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32_t b)
+ {
+ return vhsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhsub.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vhsub.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8_t b)
+ {
+ return vhsubq_n_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhsub.s8" } } */
+
++/*
++**foo1:
++** ...
++** vhsub.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8_t b)
+ {
+ return vhsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhsub.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_u16.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vhsub.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16_t b)
+ {
+ return vhsubq_n_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhsub.u16" } } */
+
++/*
++**foo1:
++** ...
++** vhsub.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16_t b)
+ {
+ return vhsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhsub.u16" } } */
++/*
++**foo2:
++** ...
++** vhsub.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t a)
++{
++ return vhsubq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_u32.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vhsub.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32_t b)
+ {
+ return vhsubq_n_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhsub.u32" } } */
+
++/*
++**foo1:
++** ...
++** vhsub.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32_t b)
+ {
+ return vhsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhsub.u32" } } */
++/*
++**foo2:
++** ...
++** vhsub.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t a)
++{
++ return vhsubq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_u8.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vhsub.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8_t b)
+ {
+ return vhsubq_n_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhsub.u8" } } */
+
++/*
++**foo1:
++** ...
++** vhsub.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8_t b)
+ {
+ return vhsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhsub.u8" } } */
++/*
++**foo2:
++** ...
++** vhsub.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t a)
++{
++ return vhsubq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vhsub.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vhsubq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhsub.s16" } } */
+
++/*
++**foo1:
++** ...
++** vhsub.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vhsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhsub.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vhsub.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vhsubq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhsub.s32" } } */
+
++/*
++**foo1:
++** ...
++** vhsub.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vhsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhsub.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vhsub.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vhsubq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhsub.s8" } } */
+
++/*
++**foo1:
++** ...
++** vhsub.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vhsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhsub.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vhsub.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b)
+ {
+ return vhsubq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhsub.u16" } } */
+
++/*
++**foo1:
++** ...
++** vhsub.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b)
+ {
+ return vhsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhsub.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vhsub.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b)
+ {
+ return vhsubq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhsub.u32" } } */
+
++/*
++**foo1:
++** ...
++** vhsub.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b)
+ {
+ return vhsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhsub.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vhsub.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+ return vhsubq_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhsub.u8" } } */
+
++/*
++**foo1:
++** ...
++** vhsub.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b)
+ {
+ return vhsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vhsub.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vhsubq_x_n_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vhsubq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vhsubq_x_n_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vhsubq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vhsubq_x_n_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vhsubq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_u16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16_t b, mve_pred16_t p)
+ {
+ return vhsubq_x_n_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16_t b, mve_pred16_t p)
+ {
+ return vhsubq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.u16" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t a, mve_pred16_t p)
++{
++ return vhsubq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_u32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vhsubq_x_n_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vhsubq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.u32" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t a, mve_pred16_t p)
++{
++ return vhsubq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_u8.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8_t b, mve_pred16_t p)
+ {
+ return vhsubq_x_n_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8_t b, mve_pred16_t p)
+ {
+ return vhsubq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.u8" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t a, mve_pred16_t p)
++{
++ return vhsubq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vhsubq_x_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vhsubq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vhsubq_x_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vhsubq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vhsubq_x_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vhsubq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vhsubq_x_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vhsubq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vhsubq_x_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vhsubq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vhsubq_x_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vhsubt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vhsubt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vhsubq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_m_n_u16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint32_t a, mve_pred16_t p)
+ {
+- return vidupq_m_n_u16 (inactive, a, 4, p);
++ return vidupq_m_n_u16 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vidupt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint32_t a, mve_pred16_t p)
+ {
+- return vidupq_m (inactive, a, 4, p);
++ return vidupq_m (inactive, a, 1, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t inactive, mve_pred16_t p)
++{
++ return vidupq_m (inactive, 1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vidupt.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_m_n_u32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32_t a, mve_pred16_t p)
+ {
+ return vidupq_m_n_u32 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vidupt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32_t a, mve_pred16_t p)
+ {
+ return vidupq_m (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vidupt.u32" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t inactive, mve_pred16_t p)
++{
++ return vidupq_m (inactive, 1, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_m_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_m_n_u8.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint32_t a, mve_pred16_t p)
+ {
+ return vidupq_m_n_u8 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vidupt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint32_t a, mve_pred16_t p)
+ {
+ return vidupq_m (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vidupt.u8" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t inactive, mve_pred16_t p)
++{
++ return vidupq_m (inactive, 1, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_m_wb_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_m_wb_u16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint32_t *a, mve_pred16_t p)
+ {
+- return vidupq_m_wb_u16 (inactive, a, 4, p);
++ return vidupq_m_wb_u16 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vidupt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint32_t *a, mve_pred16_t p)
+ {
+- return vidupq_m (inactive, a, 4, p);
++ return vidupq_m (inactive, a, 1, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t inactive, mve_pred16_t p)
++{
++ return vidupq_m (inactive, 1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vidupt.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_m_wb_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_m_wb_u32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32_t *a, mve_pred16_t p)
+ {
+ return vidupq_m_wb_u32 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vidupt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32_t *a, mve_pred16_t p)
+ {
+ return vidupq_m (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vidupt.u32" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t inactive, mve_pred16_t p)
++{
++ return vidupq_m (inactive, 1, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_m_wb_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_m_wb_u8.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint32_t *a, mve_pred16_t p)
+ {
+ return vidupq_m_wb_u8 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vidupt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint32_t *a, mve_pred16_t p)
+ {
+ return vidupq_m (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vidupt.u8" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t inactive, mve_pred16_t p)
++{
++ return vidupq_m (inactive, 1, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_n_u16.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vidup.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint32_t a)
+ {
+- return vidupq_n_u16 (a, 4);
++ return vidupq_n_u16 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vidup.u16" } } */
+
++/*
++**foo1:
++** ...
++** vidup.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint32_t a)
+ {
+- return vidupq_u16 (a, 4);
++ return vidupq_u16 (a, 1);
++}
++
++/*
++**foo2:
++** ...
++** vidup.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 ()
++{
++ return vidupq_u16 (1, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vidup.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_n_u32.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vidup.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32_t a)
+ {
+ return vidupq_n_u32 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vidup.u32" } } */
+
++/*
++**foo1:
++** ...
++** vidup.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32_t a)
+ {
+ return vidupq_u32 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vidup.u32" } } */
++/*
++**foo2:
++** ...
++** vidup.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 ()
++{
++ return vidupq_u32 (1, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_n_u8.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vidup.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint32_t a)
+ {
+ return vidupq_n_u8 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vidup.u8" } } */
+
++/*
++**foo1:
++** ...
++** vidup.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint32_t a)
+ {
+ return vidupq_u8 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vidup.u8" } } */
++/*
++**foo2:
++** ...
++** vidup.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 ()
++{
++ return vidupq_u8 (1, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_wb_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_wb_u16.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vidup.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint32_t *a)
+ {
+- return vidupq_wb_u16 (a, 4);
++ return vidupq_wb_u16 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vidup.u16" } } */
+
++/*
++**foo1:
++** ...
++** vidup.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint32_t *a)
+ {
+- return vidupq_u16 (a, 4);
++ return vidupq_u16 (a, 1);
++}
++
++/*
++**foo2:
++** ...
++** vidup.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 ()
++{
++ return vidupq_u16 (1, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vidup.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_wb_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_wb_u32.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vidup.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32_t *a)
+ {
+ return vidupq_wb_u32 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vidup.u32" } } */
+
++/*
++**foo1:
++** ...
++** vidup.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32_t *a)
+ {
+ return vidupq_u32 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vidup.u32" } } */
++/*
++**foo2:
++** ...
++** vidup.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 ()
++{
++ return vidupq_u32 (1, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_wb_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_wb_u8.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vidup.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint32_t *a)
+ {
+ return vidupq_wb_u8 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vidup.u8" } } */
+
++/*
++**foo1:
++** ...
++** vidup.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint32_t *a)
+ {
+ return vidupq_u8 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vidup.u8" } } */
++/*
++**foo2:
++** ...
++** vidup.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 ()
++{
++ return vidupq_u8 (1, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_n_u16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint32_t a, mve_pred16_t p)
+ {
+- return vidupq_x_n_u16 (a, 4, p);
++ return vidupq_x_n_u16 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vidupt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint32_t a, mve_pred16_t p)
+ {
+- return vidupq_x_u16 (a, 4, p);
++ return vidupq_x_u16 (a, 1, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (mve_pred16_t p)
++{
++ return vidupq_x_u16 (1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vidupt.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_n_u32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32_t a, mve_pred16_t p)
+ {
+ return vidupq_x_n_u32 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vidupt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32_t a, mve_pred16_t p)
+ {
+ return vidupq_x_u32 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vidupt.u32" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (mve_pred16_t p)
++{
++ return vidupq_x_u32 (1, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_n_u8.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint32_t a, mve_pred16_t p)
+ {
+ return vidupq_x_n_u8 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vidupt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint32_t a, mve_pred16_t p)
+ {
+ return vidupq_x_u8 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vidupt.u8" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (mve_pred16_t p)
++{
++ return vidupq_x_u8 (1, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_wb_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_wb_u16.c
+@@ -1,25 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
+-uint32_t *a;
++#ifdef __cplusplus
++extern "C" {
++#endif
+
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo (mve_pred16_t p)
++foo (uint32_t *a, mve_pred16_t p)
+ {
+- return vidupq_x_wb_u16 (a, 8, p);
++ return vidupq_x_wb_u16 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vidupt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo1 (mve_pred16_t p)
++foo1 (uint32_t *a, mve_pred16_t p)
+ {
+- return vidupq_x_u16 (a, 8, p);
++ return vidupq_x_u16 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vidupt.u16" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (mve_pred16_t p)
++{
++ return vidupq_x_u16 (1, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_wb_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_wb_u32.c
+@@ -1,25 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
+-uint32_t *a;
++#ifdef __cplusplus
++extern "C" {
++#endif
+
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (mve_pred16_t p)
++foo (uint32_t *a, mve_pred16_t p)
+ {
+- return vidupq_x_wb_u32 (a, 2, p);
++ return vidupq_x_wb_u32 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vidupt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (mve_pred16_t p)
++foo1 (uint32_t *a, mve_pred16_t p)
+ {
+- return vidupq_x_u32 (a, 2, p);
++ return vidupq_x_u32 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vidupt.u32" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (mve_pred16_t p)
++{
++ return vidupq_x_u32 (1, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_wb_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_wb_u8.c
+@@ -1,25 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
+-uint32_t * a;
++#ifdef __cplusplus
++extern "C" {
++#endif
+
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo (mve_pred16_t p)
++foo (uint32_t *a, mve_pred16_t p)
+ {
+- return vidupq_x_wb_u8 (a, 2, p);
++ return vidupq_x_wb_u8 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vidupt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo1 (mve_pred16_t p)
++foo1 (uint32_t *a, mve_pred16_t p)
+ {
+- return vidupq_x_u8 (a, 2, p);
++ return vidupq_x_u8 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vidupt.u8" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vidupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (mve_pred16_t p)
++{
++ return vidupq_x_u8 (1, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_m_n_u16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint32_t a, uint32_t b, mve_pred16_t p)
+ {
+- return viwdupq_m_n_u16 (inactive, a, b, 2, p);
++ return viwdupq_m_n_u16 (inactive, a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "viwdupt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint32_t a, uint32_t b, mve_pred16_t p)
+ {
+- return viwdupq_m (inactive, a, b, 2, p);
++ return viwdupq_m (inactive, a, b, 1, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t inactive, mve_pred16_t p)
++{
++ return viwdupq_m (inactive, 1, 1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "viwdupt.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_m_n_u32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32_t a, uint32_t b, mve_pred16_t p)
+ {
+- return viwdupq_m_n_u32 (inactive, a, b, 4, p);
++ return viwdupq_m_n_u32 (inactive, a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "viwdupt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32_t a, uint32_t b, mve_pred16_t p)
+ {
+- return viwdupq_m (inactive, a, b, 4, p);
++ return viwdupq_m (inactive, a, b, 1, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t inactive, mve_pred16_t p)
++{
++ return viwdupq_m (inactive, 1, 1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "viwdupt.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_m_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_m_n_u8.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint32_t a, uint32_t b, mve_pred16_t p)
+ {
+- return viwdupq_m_n_u8 (inactive, a, b, 8, p);
++ return viwdupq_m_n_u8 (inactive, a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "viwdupt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint32_t a, uint32_t b, mve_pred16_t p)
+ {
+- return viwdupq_m (inactive, a, b, 8, p);
++ return viwdupq_m (inactive, a, b, 1, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t inactive, mve_pred16_t p)
++{
++ return viwdupq_m (inactive, 1, 1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "viwdupt.u8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_m_wb_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_m_wb_u16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint32_t *a, uint32_t b, mve_pred16_t p)
+ {
+- return viwdupq_m_wb_u16 (inactive, a, b, 2, p);
++ return viwdupq_m_wb_u16 (inactive, a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "viwdupt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint32_t *a, uint32_t b, mve_pred16_t p)
+ {
+- return viwdupq_m (inactive, a, b, 2, p);
++ return viwdupq_m (inactive, a, b, 1, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t inactive, mve_pred16_t p)
++{
++ return viwdupq_m (inactive, 1, 1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "viwdupt.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_m_wb_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_m_wb_u32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32_t *a, uint32_t b, mve_pred16_t p)
+ {
+- return viwdupq_m_wb_u32 (inactive, a, b, 4, p);
++ return viwdupq_m_wb_u32 (inactive, a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "viwdupt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32_t *a, uint32_t b, mve_pred16_t p)
+ {
+- return viwdupq_m (inactive, a, b, 4, p);
++ return viwdupq_m (inactive, a, b, 1, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t inactive, mve_pred16_t p)
++{
++ return viwdupq_m (inactive, 1, 1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "viwdupt.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_m_wb_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_m_wb_u8.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint32_t *a, uint32_t b, mve_pred16_t p)
+ {
+- return viwdupq_m_wb_u8 (inactive, a, b, 8, p);
++ return viwdupq_m_wb_u8 (inactive, a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "viwdupt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint32_t *a, uint32_t b, mve_pred16_t p)
+ {
+- return viwdupq_m (inactive, a, b, 8, p);
++ return viwdupq_m (inactive, a, b, 1, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t inactive, mve_pred16_t p)
++{
++ return viwdupq_m (inactive, 1, 1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "viwdupt.u8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_n_u16.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** viwdup.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint32_t a, uint32_t b)
+ {
+- return viwdupq_n_u16 (a, b, 2);
++ return viwdupq_n_u16 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "viwdup.u16" } } */
+
++/*
++**foo1:
++** ...
++** viwdup.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint32_t a, uint32_t b)
+ {
+- return viwdupq_u16 (a, b, 2);
++ return viwdupq_u16 (a, b, 1);
++}
++
++/*
++**foo2:
++** ...
++** viwdup.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 ()
++{
++ return viwdupq_u16 (1, 1, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "viwdup.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_n_u32.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** viwdup.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32_t a, uint32_t b)
+ {
+- return viwdupq_n_u32 (a, b, 4);
++ return viwdupq_n_u32 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "viwdup.u32" } } */
+
++/*
++**foo1:
++** ...
++** viwdup.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32_t a, uint32_t b)
+ {
+- return viwdupq_u32 (a, b, 4);
++ return viwdupq_u32 (a, b, 1);
++}
++
++/*
++**foo2:
++** ...
++** viwdup.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 ()
++{
++ return viwdupq_u32 (1, 1, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "viwdup.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_n_u8.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** viwdup.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint32_t a, uint32_t b)
+ {
+ return viwdupq_n_u8 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "viwdup.u8" } } */
+
++/*
++**foo1:
++** ...
++** viwdup.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint32_t a, uint32_t b)
+ {
+ return viwdupq_u8 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "viwdup.u8" } } */
++/*
++**foo2:
++** ...
++** viwdup.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 ()
++{
++ return viwdupq_u8 (1, 1, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_wb_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_wb_u16.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** viwdup.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo (uint32_t * a, uint32_t b)
++foo (uint32_t *a, uint32_t b)
+ {
+- return viwdupq_wb_u16 (a, b, 4);
++ return viwdupq_wb_u16 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "viwdup.u16" } } */
+
++/*
++**foo1:
++** ...
++** viwdup.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo1 (uint32_t * a, uint32_t b)
++foo1 (uint32_t *a, uint32_t b)
+ {
+- return viwdupq_u16 (a, b, 4);
++ return viwdupq_u16 (a, b, 1);
++}
++
++/*
++**foo2:
++** ...
++** viwdup.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 ()
++{
++ return viwdupq_u16 (1, 1, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "viwdup.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_wb_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_wb_u32.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** viwdup.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint32_t * a, uint32_t b)
++foo (uint32_t *a, uint32_t b)
+ {
+- return viwdupq_wb_u32 (a, b, 8);
++ return viwdupq_wb_u32 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "viwdup.u32" } } */
+
++/*
++**foo1:
++** ...
++** viwdup.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (uint32_t * a, uint32_t b)
++foo1 (uint32_t *a, uint32_t b)
+ {
+- return viwdupq_u32 (a, b, 8);
++ return viwdupq_u32 (a, b, 1);
++}
++
++/*
++**foo2:
++** ...
++** viwdup.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 ()
++{
++ return viwdupq_u32 (1, 1, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "viwdup.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_wb_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_wb_u8.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** viwdup.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo (uint32_t * a, uint32_t b)
++foo (uint32_t *a, uint32_t b)
+ {
+- return viwdupq_wb_u8 (a, b, 2);
++ return viwdupq_wb_u8 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "viwdup.u8" } } */
+
++/*
++**foo1:
++** ...
++** viwdup.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo1 (uint32_t * a, uint32_t b)
++foo1 (uint32_t *a, uint32_t b)
+ {
+- return viwdupq_u8 (a, b, 2);
++ return viwdupq_u8 (a, b, 1);
++}
++
++/*
++**foo2:
++** ...
++** viwdup.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 ()
++{
++ return viwdupq_u8 (1, 1, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "viwdup.u8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_n_u16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint32_t a, uint32_t b, mve_pred16_t p)
+ {
+- return viwdupq_x_n_u16 (a, b, 2, p);
++ return viwdupq_x_n_u16 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "viwdupt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint32_t a, uint32_t b, mve_pred16_t p)
+ {
+- return viwdupq_x_u16 (a, b, 2, p);
++ return viwdupq_x_u16 (a, b, 1, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (mve_pred16_t p)
++{
++ return viwdupq_x_u16 (1, 1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "viwdupt.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_n_u32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32_t a, uint32_t b, mve_pred16_t p)
+ {
+- return viwdupq_x_n_u32 (a, b, 4, p);
++ return viwdupq_x_n_u32 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "viwdupt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32_t a, uint32_t b, mve_pred16_t p)
+ {
+- return viwdupq_x_u32 (a, b, 4, p);
++ return viwdupq_x_u32 (a, b, 1, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (mve_pred16_t p)
++{
++ return viwdupq_x_u32 (1, 1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "viwdupt.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_n_u8.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint32_t a, uint32_t b, mve_pred16_t p)
+ {
+- return viwdupq_x_n_u8 (a, b, 8, p);
++ return viwdupq_x_n_u8 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "viwdupt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint32_t a, uint32_t b, mve_pred16_t p)
+ {
+- return viwdupq_x_u8 (a, b, 8, p);
++ return viwdupq_x_u8 (a, b, 1, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (mve_pred16_t p)
++{
++ return viwdupq_x_u8 (1, 1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "viwdupt.u8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_wb_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_wb_u16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo (uint32_t * a, uint32_t b, mve_pred16_t p)
++foo (uint32_t *a, uint32_t b, mve_pred16_t p)
+ {
+- return viwdupq_x_wb_u16 (a, b, 8, p);
++ return viwdupq_x_wb_u16 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "viwdupt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo1 (uint32_t * a, uint32_t b, mve_pred16_t p)
++foo1 (uint32_t *a, uint32_t b, mve_pred16_t p)
+ {
+- return viwdupq_x_u16 (a, b, 8, p);
++ return viwdupq_x_u16 (a, b, 1, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u16 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (mve_pred16_t p)
++{
++ return viwdupq_x_u16 (1, 1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "viwdupt.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_wb_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_wb_u32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint32_t * a, uint32_t b, mve_pred16_t p)
++foo (uint32_t *a, uint32_t b, mve_pred16_t p)
+ {
+- return viwdupq_x_wb_u32 (a, b, 2, p);
++ return viwdupq_x_wb_u32 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "viwdupt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (uint32_t * a, uint32_t b, mve_pred16_t p)
++foo1 (uint32_t *a, uint32_t b, mve_pred16_t p)
+ {
+- return viwdupq_x_u32 (a, b, 2, p);
++ return viwdupq_x_u32 (a, b, 1, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u32 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (mve_pred16_t p)
++{
++ return viwdupq_x_u32 (1, 1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "viwdupt.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_wb_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_wb_u8.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo (uint32_t * a, uint32_t b, mve_pred16_t p)
++foo (uint32_t *a, uint32_t b, mve_pred16_t p)
+ {
+- return viwdupq_x_wb_u8 (a, b, 4, p);
++ return viwdupq_x_wb_u8 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "viwdupt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo1 (uint32_t * a, uint32_t b, mve_pred16_t p)
++foo1 (uint32_t *a, uint32_t b, mve_pred16_t p)
+ {
+- return viwdupq_x_u8 (a, b, 4, p);
++ return viwdupq_x_u8 (a, b, 1, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** viwdupt.u8 q[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (mve_pred16_t p)
++{
++ return viwdupq_x_u8 (1, 1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "viwdupt.u8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_f16.c
+@@ -1,20 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ float16x8_t
+-foo (float16_t const * base)
++foo (float16_t const *base)
+ {
+ return vld1q_f16 (base);
+ }
+
++
++/*
++**foo1:
++** ...
++** vldrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ float16x8_t
+-foo1 (float16_t const * base)
++foo1 (float16_t const *base)
+ {
+ return vld1q (base);
+ }
+
+-/* { dg-final { scan-assembler-times "vldrh.16" 2 } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_f32.c
+@@ -1,20 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo (float32_t const * base)
++foo (float32_t const *base)
+ {
+ return vld1q_f32 (base);
+ }
+
++
++/*
++**foo1:
++** ...
++** vldrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo1 (float32_t const * base)
++foo1 (float32_t const *base)
+ {
+ return vld1q (base);
+ }
+
+-/* { dg-final { scan-assembler-times "vldrw.32" 2 } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_s16.c
+@@ -1,20 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo (int16_t const * base)
++foo (int16_t const *base)
+ {
+ return vld1q_s16 (base);
+ }
+
++
++/*
++**foo1:
++** ...
++** vldrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo1 (int16_t const * base)
++foo1 (int16_t const *base)
+ {
+ return vld1q (base);
+ }
+
+-/* { dg-final { scan-assembler-times "vldrh.16" 2 } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_s32.c
+@@ -1,20 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int32_t const * base)
++foo (int32_t const *base)
+ {
+ return vld1q_s32 (base);
+ }
+
++
++/*
++**foo1:
++** ...
++** vldrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int32_t const * base)
++foo1 (int32_t const *base)
+ {
+ return vld1q (base);
+ }
+
+-/* { dg-final { scan-assembler-times "vldrw.32" 2 } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_s8.c
+@@ -1,20 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrb.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo (int8_t const * base)
++foo (int8_t const *base)
+ {
+ return vld1q_s8 (base);
+ }
+
++
++/*
++**foo1:
++** ...
++** vldrb.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo1 (int8_t const * base)
++foo1 (int8_t const *base)
+ {
+ return vld1q (base);
+ }
+
+-/* { dg-final { scan-assembler-times "vldrb.8" 2 } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_u16.c
+@@ -1,20 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo (uint16_t const * base)
++foo (uint16_t const *base)
+ {
+ return vld1q_u16 (base);
+ }
+
++
++/*
++**foo1:
++** ...
++** vldrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo1 (uint16_t const * base)
++foo1 (uint16_t const *base)
+ {
+ return vld1q (base);
+ }
+
+-/* { dg-final { scan-assembler-times "vldrh.16" 2 } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_u32.c
+@@ -1,20 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint32_t const * base)
++foo (uint32_t const *base)
+ {
+ return vld1q_u32 (base);
+ }
+
++
++/*
++**foo1:
++** ...
++** vldrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (uint32_t const * base)
++foo1 (uint32_t const *base)
+ {
+ return vld1q (base);
+ }
+
+-/* { dg-final { scan-assembler-times "vldrw.32" 2 } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_u8.c
+@@ -1,20 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrb.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo (uint8_t const * base)
++foo (uint8_t const *base)
+ {
+ return vld1q_u8 (base);
+ }
+
++
++/*
++**foo1:
++** ...
++** vldrb.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo1 (uint8_t const * base)
++foo1 (uint8_t const *base)
+ {
+ return vld1q (base);
+ }
+
+-/* { dg-final { scan-assembler-times "vldrb.8" 2 } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_z_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_z_f16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ float16x8_t
+-foo (float16_t const * base, mve_pred16_t p)
++foo (float16_t const *base, mve_pred16_t p)
+ {
+ return vld1q_z_f16 (base, p);
+ }
+
++
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ float16x8_t
+-foo1 (float16_t const * base, mve_pred16_t p)
++foo1 (float16_t const *base, mve_pred16_t p)
+ {
+ return vld1q_z (base, p);
+ }
+
+-/* { dg-final { scan-assembler-times "vpst" 2 } } */
+-/* { dg-final { scan-assembler-times "vldrht.16" 2 } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_z_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_z_f32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo (float32_t const * base, mve_pred16_t p)
++foo (float32_t const *base, mve_pred16_t p)
+ {
+ return vld1q_z_f32 (base, p);
+ }
+
++
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo1 (float32_t const * base, mve_pred16_t p)
++foo1 (float32_t const *base, mve_pred16_t p)
+ {
+ return vld1q_z (base, p);
+ }
+
+-/* { dg-final { scan-assembler-times "vpst" 2 } } */
+-/* { dg-final { scan-assembler-times "vldrwt.32" 2 } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_z_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_z_s16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo (int16_t const * base, mve_pred16_t p)
++foo (int16_t const *base, mve_pred16_t p)
+ {
+ return vld1q_z_s16 (base, p);
+ }
+
++
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo1 (int16_t const * base, mve_pred16_t p)
++foo1 (int16_t const *base, mve_pred16_t p)
+ {
+ return vld1q_z (base, p);
+ }
+
+-/* { dg-final { scan-assembler-times "vpst" 2 } } */
+-/* { dg-final { scan-assembler-times "vldrht.16" 2 } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_z_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_z_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int32_t const * base, mve_pred16_t p)
++foo (int32_t const *base, mve_pred16_t p)
+ {
+ return vld1q_z_s32 (base, p);
+ }
+
++
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int32_t const * base, mve_pred16_t p)
++foo1 (int32_t const *base, mve_pred16_t p)
+ {
+ return vld1q_z (base, p);
+ }
+
+-/* { dg-final { scan-assembler-times "vpst" 2 } } */
+-/* { dg-final { scan-assembler-times "vldrwt.32" 2 } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_z_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_z_s8.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrbt.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo (int8_t const * base, mve_pred16_t p)
++foo (int8_t const *base, mve_pred16_t p)
+ {
+ return vld1q_z_s8 (base, p);
+ }
+
++
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrbt.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo1 (int8_t const * base, mve_pred16_t p)
++foo1 (int8_t const *base, mve_pred16_t p)
+ {
+ return vld1q_z (base, p);
+ }
+
+-/* { dg-final { scan-assembler-times "vpst" 2 } } */
+-/* { dg-final { scan-assembler-times "vldrbt.8" 2 } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_z_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_z_u16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo (uint16_t const * base, mve_pred16_t p)
++foo (uint16_t const *base, mve_pred16_t p)
+ {
+ return vld1q_z_u16 (base, p);
+ }
+
++
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo1 (uint16_t const * base, mve_pred16_t p)
++foo1 (uint16_t const *base, mve_pred16_t p)
+ {
+ return vld1q_z (base, p);
+ }
+
+-/* { dg-final { scan-assembler-times "vpst" 2 } } */
+-/* { dg-final { scan-assembler-times "vldrht.16" 2 } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_z_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_z_u32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint32_t const * base, mve_pred16_t p)
++foo (uint32_t const *base, mve_pred16_t p)
+ {
+ return vld1q_z_u32 (base, p);
+ }
+
++
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (uint32_t const * base, mve_pred16_t p)
++foo1 (uint32_t const *base, mve_pred16_t p)
+ {
+ return vld1q_z (base, p);
+ }
+
+-/* { dg-final { scan-assembler-times "vpst" 2 } } */
+-/* { dg-final { scan-assembler-times "vldrwt.32" 2 } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_z_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld1q_z_u8.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrbt.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo (uint8_t const * base, mve_pred16_t p)
++foo (uint8_t const *base, mve_pred16_t p)
+ {
+ return vld1q_z_u8 (base, p);
+ }
+
++
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrbt.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo1 (uint8_t const * base, mve_pred16_t p)
++foo1 (uint8_t const *base, mve_pred16_t p)
+ {
+ return vld1q_z (base, p);
+ }
+
+-/* { dg-final { scan-assembler-times "vpst" 2 } } */
+-/* { dg-final { scan-assembler-times "vldrbt.8" 2 } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld2q_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld2q_f16.c
+@@ -1,22 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vld20.16 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vld21.16 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ float16x8x2_t
+-foo (float16_t const * addr)
++foo (float16_t const *addr)
+ {
+ return vld2q_f16 (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld20.16" } } */
+-/* { dg-final { scan-assembler "vld21.16" } } */
+
++/*
++**foo1:
++** ...
++** vld20.16 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vld21.16 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ float16x8x2_t
+-foo1 (float16_t const * addr)
++foo1 (float16_t const *addr)
+ {
+ return vld2q (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld20.16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld2q_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld2q_f32.c
+@@ -1,22 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vld20.32 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vld21.32 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ float32x4x2_t
+-foo (float32_t const * addr)
++foo (float32_t const *addr)
+ {
+ return vld2q_f32 (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld20.32" } } */
+-/* { dg-final { scan-assembler "vld21.32" } } */
+
++/*
++**foo1:
++** ...
++** vld20.32 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vld21.32 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ float32x4x2_t
+-foo1 (float32_t const * addr)
++foo1 (float32_t const *addr)
+ {
+ return vld2q (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld20.32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld2q_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld2q_s16.c
+@@ -1,22 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vld20.16 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vld21.16 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ int16x8x2_t
+-foo (int16_t const * addr)
++foo (int16_t const *addr)
+ {
+ return vld2q_s16 (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld20.16" } } */
+-/* { dg-final { scan-assembler "vld21.16" } } */
+
++/*
++**foo1:
++** ...
++** vld20.16 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vld21.16 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ int16x8x2_t
+-foo1 (int16_t const * addr)
++foo1 (int16_t const *addr)
+ {
+ return vld2q (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld20.16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld2q_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld2q_s32.c
+@@ -1,22 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vld20.32 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vld21.32 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ int32x4x2_t
+-foo (int32_t const * addr)
++foo (int32_t const *addr)
+ {
+ return vld2q_s32 (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld20.32" } } */
+-/* { dg-final { scan-assembler "vld21.32" } } */
+
++/*
++**foo1:
++** ...
++** vld20.32 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vld21.32 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ int32x4x2_t
+-foo1 (int32_t const * addr)
++foo1 (int32_t const *addr)
+ {
+ return vld2q (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld20.32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld2q_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld2q_s8.c
+@@ -1,22 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vld20.8 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vld21.8 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ int8x16x2_t
+-foo (int8_t const * addr)
++foo (int8_t const *addr)
+ {
+ return vld2q_s8 (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld20.8" } } */
+-/* { dg-final { scan-assembler "vld21.8" } } */
+
++/*
++**foo1:
++** ...
++** vld20.8 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vld21.8 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ int8x16x2_t
+-foo1 (int8_t const * addr)
++foo1 (int8_t const *addr)
+ {
+ return vld2q (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld20.8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld2q_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld2q_u16.c
+@@ -1,22 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vld20.16 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vld21.16 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ uint16x8x2_t
+-foo (uint16_t const * addr)
++foo (uint16_t const *addr)
+ {
+ return vld2q_u16 (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld20.16" } } */
+-/* { dg-final { scan-assembler "vld21.16" } } */
+
++/*
++**foo1:
++** ...
++** vld20.16 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vld21.16 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ uint16x8x2_t
+-foo1 (uint16_t const * addr)
++foo1 (uint16_t const *addr)
+ {
+ return vld2q (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld20.16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld2q_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld2q_u32.c
+@@ -1,22 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vld20.32 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vld21.32 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ uint32x4x2_t
+-foo (uint32_t const * addr)
++foo (uint32_t const *addr)
+ {
+ return vld2q_u32 (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld20.32" } } */
+-/* { dg-final { scan-assembler "vld21.32" } } */
+
++/*
++**foo1:
++** ...
++** vld20.32 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vld21.32 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ uint32x4x2_t
+-foo1 (uint32_t const * addr)
++foo1 (uint32_t const *addr)
+ {
+ return vld2q (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld20.32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld2q_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld2q_u8.c
+@@ -1,22 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vld20.8 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vld21.8 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ uint8x16x2_t
+-foo (uint8_t const * addr)
++foo (uint8_t const *addr)
+ {
+ return vld2q_u8 (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld20.8" } } */
+-/* { dg-final { scan-assembler "vld21.8" } } */
+
++/*
++**foo1:
++** ...
++** vld20.8 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vld21.8 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ uint8x16x2_t
+-foo1 (uint8_t const * addr)
++foo1 (uint8_t const *addr)
+ {
+ return vld2q (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld20.8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld4q_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld4q_f16.c
+@@ -1,24 +1,47 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vld40.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld41.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld42.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld43.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ float16x8x4_t
+-foo (float16_t const * addr)
++foo (float16_t const *addr)
+ {
+ return vld4q_f16 (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld40.16" } } */
+-/* { dg-final { scan-assembler "vld41.16" } } */
+-/* { dg-final { scan-assembler "vld42.16" } } */
+-/* { dg-final { scan-assembler "vld43.16" } } */
+
++/*
++**foo1:
++** ...
++** vld40.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld41.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld42.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld43.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ float16x8x4_t
+-foo1 (float16_t const * addr)
++foo1 (float16_t const *addr)
+ {
+ return vld4q (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld40.16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld4q_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld4q_f32.c
+@@ -1,24 +1,47 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vld40.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld41.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld42.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld43.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ float32x4x4_t
+-foo (float32_t const * addr)
++foo (float32_t const *addr)
+ {
+ return vld4q_f32 (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld40.32" } } */
+-/* { dg-final { scan-assembler "vld41.32" } } */
+-/* { dg-final { scan-assembler "vld42.32" } } */
+-/* { dg-final { scan-assembler "vld43.32" } } */
+
++/*
++**foo1:
++** ...
++** vld40.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld41.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld42.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld43.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ float32x4x4_t
+-foo1 (float32_t const * addr)
++foo1 (float32_t const *addr)
+ {
+ return vld4q (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld40.32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld4q_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld4q_s16.c
+@@ -1,24 +1,47 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vld40.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld41.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld42.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld43.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ int16x8x4_t
+-foo (int16_t const * addr)
++foo (int16_t const *addr)
+ {
+ return vld4q_s16 (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld40.16" } } */
+-/* { dg-final { scan-assembler "vld41.16" } } */
+-/* { dg-final { scan-assembler "vld42.16" } } */
+-/* { dg-final { scan-assembler "vld43.16" } } */
+
++/*
++**foo1:
++** ...
++** vld40.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld41.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld42.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld43.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ int16x8x4_t
+-foo1 (int16_t const * addr)
++foo1 (int16_t const *addr)
+ {
+ return vld4q (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld40.16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld4q_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld4q_s32.c
+@@ -1,24 +1,47 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vld40.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld41.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld42.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld43.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ int32x4x4_t
+-foo (int32_t const * addr)
++foo (int32_t const *addr)
+ {
+ return vld4q_s32 (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld40.32" } } */
+-/* { dg-final { scan-assembler "vld41.32" } } */
+-/* { dg-final { scan-assembler "vld42.32" } } */
+-/* { dg-final { scan-assembler "vld43.32" } } */
+
++/*
++**foo1:
++** ...
++** vld40.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld41.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld42.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld43.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ int32x4x4_t
+-foo1 (int32_t const * addr)
++foo1 (int32_t const *addr)
+ {
+ return vld4q (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld40.32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld4q_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld4q_s8.c
+@@ -1,24 +1,47 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vld40.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld41.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld42.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld43.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ int8x16x4_t
+-foo (int8_t const * addr)
++foo (int8_t const *addr)
+ {
+ return vld4q_s8 (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld40.8" } } */
+-/* { dg-final { scan-assembler "vld41.8" } } */
+-/* { dg-final { scan-assembler "vld42.8" } } */
+-/* { dg-final { scan-assembler "vld43.8" } } */
+
++/*
++**foo1:
++** ...
++** vld40.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld41.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld42.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld43.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ int8x16x4_t
+-foo1 (int8_t const * addr)
++foo1 (int8_t const *addr)
+ {
+ return vld4q (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld40.8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld4q_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld4q_u16.c
+@@ -1,24 +1,47 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vld40.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld41.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld42.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld43.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ uint16x8x4_t
+-foo (uint16_t const * addr)
++foo (uint16_t const *addr)
+ {
+ return vld4q_u16 (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld40.16" } } */
+-/* { dg-final { scan-assembler "vld41.16" } } */
+-/* { dg-final { scan-assembler "vld42.16" } } */
+-/* { dg-final { scan-assembler "vld43.16" } } */
+
++/*
++**foo1:
++** ...
++** vld40.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld41.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld42.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld43.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ uint16x8x4_t
+-foo1 (uint16_t const * addr)
++foo1 (uint16_t const *addr)
+ {
+ return vld4q (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld40.16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld4q_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld4q_u32.c
+@@ -1,24 +1,47 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vld40.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld41.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld42.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld43.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ uint32x4x4_t
+-foo (uint32_t const * addr)
++foo (uint32_t const *addr)
+ {
+ return vld4q_u32 (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld40.32" } } */
+-/* { dg-final { scan-assembler "vld41.32" } } */
+-/* { dg-final { scan-assembler "vld42.32" } } */
+-/* { dg-final { scan-assembler "vld43.32" } } */
+
++/*
++**foo1:
++** ...
++** vld40.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld41.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld42.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld43.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ uint32x4x4_t
+-foo1 (uint32_t const * addr)
++foo1 (uint32_t const *addr)
+ {
+ return vld4q (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld40.32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld4q_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vld4q_u8.c
+@@ -1,24 +1,47 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vld40.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld41.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld42.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld43.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ uint8x16x4_t
+-foo (uint8_t const * addr)
++foo (uint8_t const *addr)
+ {
+ return vld4q_u8 (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld40.8" } } */
+-/* { dg-final { scan-assembler "vld41.8" } } */
+-/* { dg-final { scan-assembler "vld42.8" } } */
+-/* { dg-final { scan-assembler "vld43.8" } } */
+
++/*
++**foo1:
++** ...
++** vld40.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld41.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld42.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vld43.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ uint8x16x4_t
+-foo1 (uint8_t const * addr)
++foo1 (uint8_t const *addr)
+ {
+ return vld4q (addr);
+ }
+
+-/* { dg-final { scan-assembler "vld40.8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrb.s16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo (int8_t const * base, uint16x8_t offset)
++foo (int8_t const *base, uint16x8_t offset)
+ {
+ return vldrbq_gather_offset_s16 (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrb.s16" } } */
+
++/*
++**foo1:
++** ...
++** vldrb.s16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo1 (int8_t const * base, uint16x8_t offset)
++foo1 (int8_t const *base, uint16x8_t offset)
+ {
+ return vldrbq_gather_offset (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrb.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrb.s32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int8_t const * base, uint32x4_t offset)
++foo (int8_t const *base, uint32x4_t offset)
+ {
+ return vldrbq_gather_offset_s32 (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrb.s32" } } */
+
++/*
++**foo1:
++** ...
++** vldrb.s32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int8_t const * base, uint32x4_t offset)
++foo1 (int8_t const *base, uint32x4_t offset)
+ {
+ return vldrbq_gather_offset (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrb.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrb.u8 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo (int8_t const * base, uint8x16_t offset)
++foo (int8_t const *base, uint8x16_t offset)
+ {
+ return vldrbq_gather_offset_s8 (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrb.u8" } } */
+
++/*
++**foo1:
++** ...
++** vldrb.u8 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo1 (int8_t const * base, uint8x16_t offset)
++foo1 (int8_t const *base, uint8x16_t offset)
+ {
+ return vldrbq_gather_offset (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrb.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrb.u16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo (uint8_t const * base, uint16x8_t offset)
++foo (uint8_t const *base, uint16x8_t offset)
+ {
+ return vldrbq_gather_offset_u16 (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrb.u16" } } */
+
++/*
++**foo1:
++** ...
++** vldrb.u16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo1 (uint8_t const * base, uint16x8_t offset)
++foo1 (uint8_t const *base, uint16x8_t offset)
+ {
+ return vldrbq_gather_offset (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrb.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrb.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint8_t const * base, uint32x4_t offset)
++foo (uint8_t const *base, uint32x4_t offset)
+ {
+ return vldrbq_gather_offset_u32 (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrb.u32" } } */
+
++/*
++**foo1:
++** ...
++** vldrb.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (uint8_t const * base, uint32x4_t offset)
++foo1 (uint8_t const *base, uint32x4_t offset)
+ {
+ return vldrbq_gather_offset (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrb.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrb.u8 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo (uint8_t const * base, uint8x16_t offset)
++foo (uint8_t const *base, uint8x16_t offset)
+ {
+ return vldrbq_gather_offset_u8 (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrb.u8" } } */
+
++/*
++**foo1:
++** ...
++** vldrb.u8 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo1 (uint8_t const * base, uint8x16_t offset)
++foo1 (uint8_t const *base, uint8x16_t offset)
+ {
+ return vldrbq_gather_offset (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrb.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_z_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_z_s16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrbt.s16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo (int8_t const * base, uint16x8_t offset, mve_pred16_t p)
++foo (int8_t const *base, uint16x8_t offset, mve_pred16_t p)
+ {
+ return vldrbq_gather_offset_z_s16 (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrbt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrbt.s16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo1 (int8_t const * base, uint16x8_t offset, mve_pred16_t p)
++foo1 (int8_t const *base, uint16x8_t offset, mve_pred16_t p)
+ {
+ return vldrbq_gather_offset_z (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrbt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_z_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_z_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrbt.s32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int8_t const * base, uint32x4_t offset, mve_pred16_t p)
++foo (int8_t const *base, uint32x4_t offset, mve_pred16_t p)
+ {
+ return vldrbq_gather_offset_z_s32 (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrbt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrbt.s32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int8_t const * base, uint32x4_t offset, mve_pred16_t p)
++foo1 (int8_t const *base, uint32x4_t offset, mve_pred16_t p)
+ {
+ return vldrbq_gather_offset_z (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrbt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_z_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_z_s8.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrbt.u8 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo (int8_t const * base, uint8x16_t offset, mve_pred16_t p)
++foo (int8_t const *base, uint8x16_t offset, mve_pred16_t p)
+ {
+ return vldrbq_gather_offset_z_s8 (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrbt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrbt.u8 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo1 (int8_t const * base, uint8x16_t offset, mve_pred16_t p)
++foo1 (int8_t const *base, uint8x16_t offset, mve_pred16_t p)
+ {
+ return vldrbq_gather_offset_z (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrbt.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_z_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_z_u16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrbt.u16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo (uint8_t const * base, uint16x8_t offset, mve_pred16_t p)
++foo (uint8_t const *base, uint16x8_t offset, mve_pred16_t p)
+ {
+ return vldrbq_gather_offset_z_u16 (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrbt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrbt.u16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo1 (uint8_t const * base, uint16x8_t offset, mve_pred16_t p)
++foo1 (uint8_t const *base, uint16x8_t offset, mve_pred16_t p)
+ {
+ return vldrbq_gather_offset_z (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrbt.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_z_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_z_u32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrbt.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint8_t const * base, uint32x4_t offset, mve_pred16_t p)
++foo (uint8_t const *base, uint32x4_t offset, mve_pred16_t p)
+ {
+ return vldrbq_gather_offset_z_u32 (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrbt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrbt.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (uint8_t const * base, uint32x4_t offset, mve_pred16_t p)
++foo1 (uint8_t const *base, uint32x4_t offset, mve_pred16_t p)
+ {
+ return vldrbq_gather_offset_z (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrbt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_z_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_z_u8.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrbt.u8 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo (uint8_t const * base, uint8x16_t offset, mve_pred16_t p)
++foo (uint8_t const *base, uint8x16_t offset, mve_pred16_t p)
+ {
+ return vldrbq_gather_offset_z_u8 (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrbt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrbt.u8 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo1 (uint8_t const * base, uint8x16_t offset, mve_pred16_t p)
++foo1 (uint8_t const *base, uint8x16_t offset, mve_pred16_t p)
+ {
+ return vldrbq_gather_offset_z (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrbt.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_s16.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrb.s16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo (int8_t const * base)
++foo (int8_t const *base)
+ {
+ return vldrbq_s16 (base);
+ }
+
+-/* { dg-final { scan-assembler "vldrb.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_s32.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrb.s32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int8_t const * base)
++foo (int8_t const *base)
+ {
+ return vldrbq_s32 (base);
+ }
+
+-/* { dg-final { scan-assembler "vldrb.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_s8.c
+@@ -1,14 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrb.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo (int8_t const * base)
++foo (int8_t const *base)
+ {
+ return vldrbq_s8 (base);
+ }
+
+-/* { dg-final { scan-assembler-times "vldrb.8" 1 } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_u16.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrb.u16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo (uint8_t const * base)
++foo (uint8_t const *base)
+ {
+ return vldrbq_u16 (base);
+ }
+
+-/* { dg-final { scan-assembler "vldrb.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_u32.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrb.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint8_t const * base)
++foo (uint8_t const *base)
+ {
+ return vldrbq_u32 (base);
+ }
+
+-/* { dg-final { scan-assembler "vldrb.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_u8.c
+@@ -1,14 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrb.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo (uint8_t const * base)
++foo (uint8_t const *base)
+ {
+ return vldrbq_u8 (base);
+ }
+
+-/* { dg-final { scan-assembler-times "vldrb.8" 1 } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_z_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_z_s16.c
+@@ -1,13 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrbt.s16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo (int8_t const * base, mve_pred16_t p)
++foo (int8_t const *base, mve_pred16_t p)
+ {
+ return vldrbq_z_s16 (base, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrbt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_z_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_z_s32.c
+@@ -1,13 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrbt.s32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int8_t const * base, mve_pred16_t p)
++foo (int8_t const *base, mve_pred16_t p)
+ {
+ return vldrbq_z_s32 (base, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrbt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_z_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_z_s8.c
+@@ -1,15 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrbt.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo (int8_t const * base, mve_pred16_t p)
++foo (int8_t const *base, mve_pred16_t p)
+ {
+ return vldrbq_z_s8 (base, p);
+ }
+
+-/* { dg-final { scan-assembler-times "vpst" 1 } } */
+-/* { dg-final { scan-assembler-times "vldrbt.8" 1 } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_z_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_z_u16.c
+@@ -1,13 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrbt.u16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo (uint8_t const * base, mve_pred16_t p)
++foo (uint8_t const *base, mve_pred16_t p)
+ {
+ return vldrbq_z_u16 (base, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrbt.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_z_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_z_u32.c
+@@ -1,13 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrbt.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint8_t const * base, mve_pred16_t p)
++foo (uint8_t const *base, mve_pred16_t p)
+ {
+ return vldrbq_z_u32 (base, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrbt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_z_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrbq_z_u8.c
+@@ -1,15 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrbt.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo (uint8_t const * base, mve_pred16_t p)
++foo (uint8_t const *base, mve_pred16_t p)
+ {
+ return vldrbq_z_u8 (base, p);
+ }
+
+-/* { dg-final { scan-assembler-times "vpst" 1 } } */
+-/* { dg-final { scan-assembler-times "vldrbt.8" 1 } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_base_s64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_base_s64.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrd.64 q[0-9]+, \[q[0-9]+, #[0-9]+\](?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo (uint64x2_t addr)
+ {
+- return vldrdq_gather_base_s64 (addr, 8);
++ return vldrdq_gather_base_s64 (addr, 0);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vldrd.64" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_base_u64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_base_u64.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrd.64 q[0-9]+, \[q[0-9]+, #[0-9]+\](?: @.*|)
++** ...
++*/
+ uint64x2_t
+ foo (uint64x2_t addr)
+ {
+- return vldrdq_gather_base_u64 (addr, 8);
++ return vldrdq_gather_base_u64 (addr, 0);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vldrd.64" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_base_wb_s64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_base_wb_s64.c
+@@ -1,16 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrd.64 q[0-9]+, \[q[0-9]+, #[0-9]+\]!(?: @.*|)
++** ...
++*/
+ int64x2_t
+-foo (uint64x2_t * addr)
++foo (uint64x2_t *addr)
+ {
+- return vldrdq_gather_base_wb_s64 (addr, 8);
++ return vldrdq_gather_base_wb_s64 (addr, 0);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vldrd.64\tq\[0-9\]+, \\\[q\[0-9\]+, #\[0-9\]+\\\]!" } } */
+-/* { dg-final { scan-assembler-times "vldrw.u32" 1 } } */
+-/* { dg-final { scan-assembler-times "vstrw.32" 1 } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_base_wb_u64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_base_wb_u64.c
+@@ -1,16 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrd.64 q[0-9]+, \[q[0-9]+, #[0-9]+\]!(?: @.*|)
++** ...
++*/
+ uint64x2_t
+-foo (uint64x2_t * addr)
++foo (uint64x2_t *addr)
+ {
+- return vldrdq_gather_base_wb_u64 (addr, 8);
++ return vldrdq_gather_base_wb_u64 (addr, 0);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vldrd.64\tq\[0-9\]+, \\\[q\[0-9\]+, #\[0-9\]+\\\]!" } } */
+-/* { dg-final { scan-assembler-times "vldrw.u32" 1 } } */
+-/* { dg-final { scan-assembler-times "vstrw.32" 1 } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_base_wb_z_s64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_base_wb_z_s64.c
+@@ -1,15 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
++
+ #include "arm_mve.h"
+
+-int64x2_t foo (uint64x2_t * addr, mve_pred16_t p)
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrdt.u64 q[0-9]+, \[q[0-9]+, #[0-9]+\]!(?: @.*|)
++** ...
++*/
++int64x2_t
++foo (uint64x2_t *addr, mve_pred16_t p)
+ {
+- return vldrdq_gather_base_wb_z_s64 (addr, 1016, p);
++ return vldrdq_gather_base_wb_z_s64 (addr, 0, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vldrdt.u64\tq\[0-9\]+, \\\[q\[0-9\]+, #\[0-9\]+\\\]!" } } */
+-/* { dg-final { scan-assembler-times "vldrw.u32" 1 } } */
+-/* { dg-final { scan-assembler-times "vstrw.32" 1 } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_base_wb_z_u64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_base_wb_z_u64.c
+@@ -1,15 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
++
+ #include "arm_mve.h"
+
+-uint64x2_t foo (uint64x2_t * addr, mve_pred16_t p)
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrdt.u64 q[0-9]+, \[q[0-9]+, #[0-9]+\]!(?: @.*|)
++** ...
++*/
++uint64x2_t
++foo (uint64x2_t *addr, mve_pred16_t p)
+ {
+- return vldrdq_gather_base_wb_z_u64 (addr, 8, p);
++ return vldrdq_gather_base_wb_z_u64 (addr, 0, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vldrdt.u64\tq\[0-9\]+, \\\[q\[0-9\]+, #\[0-9\]+\\\]!" } } */
+-/* { dg-final { scan-assembler-times "vldrw.u32" 1 } } */
+-/* { dg-final { scan-assembler-times "vstrw.32" 1 } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_base_z_s64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_base_z_s64.c
+@@ -1,13 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrdt.u64 q[0-9]+, \[q[0-9]+, #[0-9]+\](?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo (uint64x2_t addr, mve_pred16_t p)
+ {
+- return vldrdq_gather_base_z_s64 (addr, 8, p);
++ return vldrdq_gather_base_z_s64 (addr, 0, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vldrdt.u64" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_base_z_u64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_base_z_u64.c
+@@ -1,13 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrdt.u64 q[0-9]+, \[q[0-9]+, #[0-9]+\](?: @.*|)
++** ...
++*/
+ uint64x2_t
+ foo (uint64x2_t addr, mve_pred16_t p)
+ {
+- return vldrdq_gather_base_z_u64 (addr, 8, p);
++ return vldrdq_gather_base_z_u64 (addr, 0, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vldrdt.u64" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_offset_s64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_offset_s64.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrd.u64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ int64x2_t
+-foo (int64_t const * base, uint64x2_t offset)
++foo (int64_t const *base, uint64x2_t offset)
+ {
+ return vldrdq_gather_offset_s64 (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrd.u64" } } */
+
++/*
++**foo1:
++** ...
++** vldrd.u64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ int64x2_t
+-foo1 (int64_t const * base, uint64x2_t offset)
++foo1 (int64_t const *base, uint64x2_t offset)
+ {
+ return vldrdq_gather_offset (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrd.u64" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_offset_u64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_offset_u64.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrd.u64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ uint64x2_t
+-foo (uint64_t const * base, uint64x2_t offset)
++foo (uint64_t const *base, uint64x2_t offset)
+ {
+ return vldrdq_gather_offset_u64 (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrd.u64" } } */
+
++/*
++**foo1:
++** ...
++** vldrd.u64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ uint64x2_t
+-foo1 (uint64_t const * base, uint64x2_t offset)
++foo1 (uint64_t const *base, uint64x2_t offset)
+ {
+ return vldrdq_gather_offset (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrd.u64" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_offset_z_s64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_offset_z_s64.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrdt.u64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ int64x2_t
+-foo (int64_t const * base, uint64x2_t offset, mve_pred16_t p)
++foo (int64_t const *base, uint64x2_t offset, mve_pred16_t p)
+ {
+ return vldrdq_gather_offset_z_s64 (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrdt.u64" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrdt.u64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ int64x2_t
+-foo1 (int64_t const * base, uint64x2_t offset, mve_pred16_t p)
++foo1 (int64_t const *base, uint64x2_t offset, mve_pred16_t p)
+ {
+ return vldrdq_gather_offset_z (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrdt.u64" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_offset_z_u64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_offset_z_u64.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrdt.u64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ uint64x2_t
+-foo (uint64_t const * base, uint64x2_t offset, mve_pred16_t p)
++foo (uint64_t const *base, uint64x2_t offset, mve_pred16_t p)
+ {
+ return vldrdq_gather_offset_z_u64 (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrdt.u64" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrdt.u64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ uint64x2_t
+-foo1 (uint64_t const * base, uint64x2_t offset, mve_pred16_t p)
++foo1 (uint64_t const *base, uint64x2_t offset, mve_pred16_t p)
+ {
+ return vldrdq_gather_offset_z (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrdt.u64" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_shifted_offset_s64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_shifted_offset_s64.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrd.u64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #3\](?: @.*|)
++** ...
++*/
+ int64x2_t
+-foo (int64_t const * base, uint64x2_t offset)
++foo (int64_t const *base, uint64x2_t offset)
+ {
+ return vldrdq_gather_shifted_offset_s64 (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrd.u64" } } */
+
++/*
++**foo1:
++** ...
++** vldrd.u64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #3\](?: @.*|)
++** ...
++*/
+ int64x2_t
+-foo1 (int64_t const * base, uint64x2_t offset)
++foo1 (int64_t const *base, uint64x2_t offset)
+ {
+ return vldrdq_gather_shifted_offset (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrd.u64" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_shifted_offset_u64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_shifted_offset_u64.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrd.u64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #3\](?: @.*|)
++** ...
++*/
+ uint64x2_t
+-foo (uint64_t const * base, uint64x2_t offset)
++foo (uint64_t const *base, uint64x2_t offset)
+ {
+ return vldrdq_gather_shifted_offset_u64 (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrd.u64" } } */
+
++/*
++**foo1:
++** ...
++** vldrd.u64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #3\](?: @.*|)
++** ...
++*/
+ uint64x2_t
+-foo1 (uint64_t const * base, uint64x2_t offset)
++foo1 (uint64_t const *base, uint64x2_t offset)
+ {
+ return vldrdq_gather_shifted_offset (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrd.u64" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_shifted_offset_z_s64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_shifted_offset_z_s64.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrdt.u64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #3\](?: @.*|)
++** ...
++*/
+ int64x2_t
+-foo (int64_t const * base, uint64x2_t offset, mve_pred16_t p)
++foo (int64_t const *base, uint64x2_t offset, mve_pred16_t p)
+ {
+ return vldrdq_gather_shifted_offset_z_s64 (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrdt.u64" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrdt.u64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #3\](?: @.*|)
++** ...
++*/
+ int64x2_t
+-foo1 (int64_t const * base, uint64x2_t offset, mve_pred16_t p)
++foo1 (int64_t const *base, uint64x2_t offset, mve_pred16_t p)
+ {
+ return vldrdq_gather_shifted_offset_z (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrdt.u64" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_shifted_offset_z_u64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrdq_gather_shifted_offset_z_u64.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrdt.u64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #3\](?: @.*|)
++** ...
++*/
+ uint64x2_t
+-foo (uint64_t const * base, uint64x2_t offset, mve_pred16_t p)
++foo (uint64_t const *base, uint64x2_t offset, mve_pred16_t p)
+ {
+ return vldrdq_gather_shifted_offset_z_u64 (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrdt.u64" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrdt.u64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #3\](?: @.*|)
++** ...
++*/
+ uint64x2_t
+-foo1 (uint64_t const * base, uint64x2_t offset, mve_pred16_t p)
++foo1 (uint64_t const *base, uint64x2_t offset, mve_pred16_t p)
+ {
+ return vldrdq_gather_shifted_offset_z (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrdt.u64" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_f16.c
+@@ -1,14 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ float16x8_t
+-foo (float16_t const * base)
++foo (float16_t const *base)
+ {
+ return vldrhq_f16 (base);
+ }
+
+-/* { dg-final { scan-assembler-times "vldrh.16" 1 } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_offset_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_offset_f16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrh.f16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ float16x8_t
+-foo (float16_t const * base, uint16x8_t offset)
++foo (float16_t const *base, uint16x8_t offset)
+ {
+ return vldrhq_gather_offset_f16 (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrh.f16" } } */
+
++/*
++**foo1:
++** ...
++** vldrh.f16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ float16x8_t
+-foo1 (float16_t const * base, uint16x8_t offset)
++foo1 (float16_t const *base, uint16x8_t offset)
+ {
+ return vldrhq_gather_offset (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrh.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_offset_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_offset_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrh.u16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo (int16_t const * base, uint16x8_t offset)
++foo (int16_t const *base, uint16x8_t offset)
+ {
+ return vldrhq_gather_offset_s16 (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrh.u16" } } */
+
++/*
++**foo1:
++** ...
++** vldrh.u16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo1 (int16_t const * base, uint16x8_t offset)
++foo1 (int16_t const *base, uint16x8_t offset)
+ {
+ return vldrhq_gather_offset (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrh.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_offset_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_offset_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrh.s32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int16_t const * base, uint32x4_t offset)
++foo (int16_t const *base, uint32x4_t offset)
+ {
+ return vldrhq_gather_offset_s32 (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrh.s32" } } */
+
++/*
++**foo1:
++** ...
++** vldrh.s32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int16_t const * base, uint32x4_t offset)
++foo1 (int16_t const *base, uint32x4_t offset)
+ {
+ return vldrhq_gather_offset (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrh.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_offset_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_offset_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrh.u16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo (uint16_t const * base, uint16x8_t offset)
++foo (uint16_t const *base, uint16x8_t offset)
+ {
+ return vldrhq_gather_offset_u16 (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrh.u16" } } */
+
++/*
++**foo1:
++** ...
++** vldrh.u16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo1 (uint16_t const * base, uint16x8_t offset)
++foo1 (uint16_t const *base, uint16x8_t offset)
+ {
+ return vldrhq_gather_offset (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrh.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_offset_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_offset_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrh.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint16_t const * base, uint32x4_t offset)
++foo (uint16_t const *base, uint32x4_t offset)
+ {
+ return vldrhq_gather_offset_u32 (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrh.u32" } } */
+
++/*
++**foo1:
++** ...
++** vldrh.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (uint16_t const * base, uint32x4_t offset)
++foo1 (uint16_t const *base, uint32x4_t offset)
+ {
+ return vldrhq_gather_offset (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrh.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_offset_z_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_offset_z_f16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrht.f16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ float16x8_t
+-foo (float16_t const * base, uint16x8_t offset, mve_pred16_t p)
++foo (float16_t const *base, uint16x8_t offset, mve_pred16_t p)
+ {
+ return vldrhq_gather_offset_z_f16 (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrht.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrht.f16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ float16x8_t
+-foo1 (float16_t const * base, uint16x8_t offset, mve_pred16_t p)
++foo1 (float16_t const *base, uint16x8_t offset, mve_pred16_t p)
+ {
+ return vldrhq_gather_offset_z (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrht.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_offset_z_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_offset_z_s16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrht.u16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo (int16_t const * base, uint16x8_t offset, mve_pred16_t p)
++foo (int16_t const *base, uint16x8_t offset, mve_pred16_t p)
+ {
+ return vldrhq_gather_offset_z_s16 (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrht.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrht.u16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo1 (int16_t const * base, uint16x8_t offset, mve_pred16_t p)
++foo1 (int16_t const *base, uint16x8_t offset, mve_pred16_t p)
+ {
+ return vldrhq_gather_offset_z (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrht.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_offset_z_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_offset_z_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrht.s32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int16_t const * base, uint32x4_t offset, mve_pred16_t p)
++foo (int16_t const *base, uint32x4_t offset, mve_pred16_t p)
+ {
+ return vldrhq_gather_offset_z_s32 (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrht.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrht.s32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int16_t const * base, uint32x4_t offset, mve_pred16_t p)
++foo1 (int16_t const *base, uint32x4_t offset, mve_pred16_t p)
+ {
+ return vldrhq_gather_offset_z (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrht.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_offset_z_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_offset_z_u16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrht.u16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo (uint16_t const * base, uint16x8_t offset, mve_pred16_t p)
++foo (uint16_t const *base, uint16x8_t offset, mve_pred16_t p)
+ {
+ return vldrhq_gather_offset_z_u16 (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrht.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrht.u16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo1 (uint16_t const * base, uint16x8_t offset, mve_pred16_t p)
++foo1 (uint16_t const *base, uint16x8_t offset, mve_pred16_t p)
+ {
+ return vldrhq_gather_offset_z (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrht.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_offset_z_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_offset_z_u32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrht.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint16_t const * base, uint32x4_t offset, mve_pred16_t p)
++foo (uint16_t const *base, uint32x4_t offset, mve_pred16_t p)
+ {
+ return vldrhq_gather_offset_z_u32 (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrht.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrht.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (uint16_t const * base, uint32x4_t offset, mve_pred16_t p)
++foo1 (uint16_t const *base, uint32x4_t offset, mve_pred16_t p)
+ {
+ return vldrhq_gather_offset_z (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrht.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_shifted_offset_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_shifted_offset_f16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrh.f16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ float16x8_t
+-foo (float16_t const * base, uint16x8_t offset)
++foo (float16_t const *base, uint16x8_t offset)
+ {
+ return vldrhq_gather_shifted_offset_f16 (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrh.f16" } } */
+
++/*
++**foo1:
++** ...
++** vldrh.f16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ float16x8_t
+-foo1 (float16_t const * base, uint16x8_t offset)
++foo1 (float16_t const *base, uint16x8_t offset)
+ {
+ return vldrhq_gather_shifted_offset (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrh.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_shifted_offset_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_shifted_offset_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrh.u16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo (int16_t const * base, uint16x8_t offset)
++foo (int16_t const *base, uint16x8_t offset)
+ {
+ return vldrhq_gather_shifted_offset_s16 (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrh.u16" } } */
+
++/*
++**foo1:
++** ...
++** vldrh.u16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo1 (int16_t const * base, uint16x8_t offset)
++foo1 (int16_t const *base, uint16x8_t offset)
+ {
+ return vldrhq_gather_shifted_offset (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrh.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_shifted_offset_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_shifted_offset_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrh.s32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int16_t const * base, uint32x4_t offset)
++foo (int16_t const *base, uint32x4_t offset)
+ {
+ return vldrhq_gather_shifted_offset_s32 (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrh.s32" } } */
+
++/*
++**foo1:
++** ...
++** vldrh.s32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int16_t const * base, uint32x4_t offset)
++foo1 (int16_t const *base, uint32x4_t offset)
+ {
+ return vldrhq_gather_shifted_offset (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrh.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_shifted_offset_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_shifted_offset_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrh.u16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo (uint16_t const * base, uint16x8_t offset)
++foo (uint16_t const *base, uint16x8_t offset)
+ {
+ return vldrhq_gather_shifted_offset_u16 (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrh.u16" } } */
+
++/*
++**foo1:
++** ...
++** vldrh.u16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo1 (uint16_t const * base, uint16x8_t offset)
++foo1 (uint16_t const *base, uint16x8_t offset)
+ {
+ return vldrhq_gather_shifted_offset (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrh.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_shifted_offset_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_shifted_offset_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrh.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint16_t const * base, uint32x4_t offset)
++foo (uint16_t const *base, uint32x4_t offset)
+ {
+ return vldrhq_gather_shifted_offset_u32 (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrh.u32" } } */
+
++/*
++**foo1:
++** ...
++** vldrh.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (uint16_t const * base, uint32x4_t offset)
++foo1 (uint16_t const *base, uint32x4_t offset)
+ {
+ return vldrhq_gather_shifted_offset (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrh.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_shifted_offset_z_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_shifted_offset_z_f16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrht.f16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ float16x8_t
+-foo (float16_t const * base, uint16x8_t offset, mve_pred16_t p)
++foo (float16_t const *base, uint16x8_t offset, mve_pred16_t p)
+ {
+ return vldrhq_gather_shifted_offset_z_f16 (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrht.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrht.f16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ float16x8_t
+-foo1 (float16_t const * base, uint16x8_t offset, mve_pred16_t p)
++foo1 (float16_t const *base, uint16x8_t offset, mve_pred16_t p)
+ {
+ return vldrhq_gather_shifted_offset_z (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrht.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_shifted_offset_z_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_shifted_offset_z_s16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrht.u16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo (int16_t const * base, uint16x8_t offset, mve_pred16_t p)
++foo (int16_t const *base, uint16x8_t offset, mve_pred16_t p)
+ {
+ return vldrhq_gather_shifted_offset_z_s16 (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrht.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrht.u16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo1 (int16_t const * base, uint16x8_t offset, mve_pred16_t p)
++foo1 (int16_t const *base, uint16x8_t offset, mve_pred16_t p)
+ {
+ return vldrhq_gather_shifted_offset_z (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrht.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_shifted_offset_z_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_shifted_offset_z_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrht.s32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int16_t const * base, uint32x4_t offset, mve_pred16_t p)
++foo (int16_t const *base, uint32x4_t offset, mve_pred16_t p)
+ {
+ return vldrhq_gather_shifted_offset_z_s32 (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrht.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrht.s32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int16_t const * base, uint32x4_t offset, mve_pred16_t p)
++foo1 (int16_t const *base, uint32x4_t offset, mve_pred16_t p)
+ {
+ return vldrhq_gather_shifted_offset_z (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrht.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_shifted_offset_z_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_shifted_offset_z_u16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrht.u16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo (uint16_t const * base, uint16x8_t offset, mve_pred16_t p)
++foo (uint16_t const *base, uint16x8_t offset, mve_pred16_t p)
+ {
+ return vldrhq_gather_shifted_offset_z_u16 (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrht.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrht.u16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo1 (uint16_t const * base, uint16x8_t offset, mve_pred16_t p)
++foo1 (uint16_t const *base, uint16x8_t offset, mve_pred16_t p)
+ {
+ return vldrhq_gather_shifted_offset_z (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrht.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_shifted_offset_z_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_shifted_offset_z_u32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrht.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint16_t const * base, uint32x4_t offset, mve_pred16_t p)
++foo (uint16_t const *base, uint32x4_t offset, mve_pred16_t p)
+ {
+ return vldrhq_gather_shifted_offset_z_u32 (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrht.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrht.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (uint16_t const * base, uint32x4_t offset, mve_pred16_t p)
++foo1 (uint16_t const *base, uint32x4_t offset, mve_pred16_t p)
+ {
+ return vldrhq_gather_shifted_offset_z (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrht.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_s16.c
+@@ -1,14 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo (int16_t const * base)
++foo (int16_t const *base)
+ {
+ return vldrhq_s16 (base);
+ }
+
+-/* { dg-final { scan-assembler-times "vldrh.16" 1 } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_s32.c
+@@ -1,14 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrh.s32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int16_t const * base)
++foo (int16_t const *base)
+ {
+ return vldrhq_s32 (base);
+ }
+
+-/* { dg-final { scan-assembler-times "vldrh.s32" 1 } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_u16.c
+@@ -1,14 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo (uint16_t const * base)
++foo (uint16_t const *base)
+ {
+ return vldrhq_u16 (base);
+ }
+
+-/* { dg-final { scan-assembler-times "vldrh.16" 1 } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_u32.c
+@@ -1,14 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrh.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint16_t const * base)
++foo (uint16_t const *base)
+ {
+ return vldrhq_u32 (base);
+ }
+
+-/* { dg-final { scan-assembler-times "vldrh.u32" 1 } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_z_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_z_f16.c
+@@ -1,15 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ float16x8_t
+-foo (float16_t const * base, mve_pred16_t p)
++foo (float16_t const *base, mve_pred16_t p)
+ {
+ return vldrhq_z_f16 (base, p);
+ }
+
+-/* { dg-final { scan-assembler-times "vpst" 1 } } */
+-/* { dg-final { scan-assembler-times "vldrht.16" 1 } } */
++#ifdef __cplusplus
++}
++#endif
++
+ /* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_z_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_z_s16.c
+@@ -1,15 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo (int16_t const * base, mve_pred16_t p)
++foo (int16_t const *base, mve_pred16_t p)
+ {
+ return vldrhq_z_s16 (base, p);
+ }
+
+-/* { dg-final { scan-assembler-times "vpst" 1 } } */
+-/* { dg-final { scan-assembler-times "vldrht.16" 1 } } */
++#ifdef __cplusplus
++}
++#endif
++
+ /* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_z_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_z_s32.c
+@@ -1,15 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrht.s32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int16_t const * base, mve_pred16_t p)
++foo (int16_t const *base, mve_pred16_t p)
+ {
+ return vldrhq_z_s32 (base, p);
+ }
+
+-/* { dg-final { scan-assembler-times "vpst" 1 } } */
+-/* { dg-final { scan-assembler-times "vldrht.s32" 1 } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_z_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_z_u16.c
+@@ -1,15 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo (uint16_t const * base, mve_pred16_t p)
++foo (uint16_t const *base, mve_pred16_t p)
+ {
+ return vldrhq_z_u16 (base, p);
+ }
+
+-/* { dg-final { scan-assembler-times "vpst" 1 } } */
+-/* { dg-final { scan-assembler-times "vldrht.16" 1 } } */
++#ifdef __cplusplus
++}
++#endif
++
+ /* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_z_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_z_u32.c
+@@ -1,15 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrht.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint16_t const * base, mve_pred16_t p)
++foo (uint16_t const *base, mve_pred16_t p)
+ {
+ return vldrhq_z_u32 (base, p);
+ }
+
+-/* { dg-final { scan-assembler-times "vpst" 1 } } */
+-/* { dg-final { scan-assembler-times "vldrht.u32" 1 } } */
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_f32.c
+@@ -1,14 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo (float32_t const * base)
++foo (float32_t const *base)
+ {
+ return vldrwq_f32 (base);
+ }
+
+-/* { dg-final { scan-assembler-times "vldrw.32" 1 } } */
++#ifdef __cplusplus
++}
++#endif
++
+ /* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_base_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_base_f32.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrw.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\](?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (uint32x4_t addr)
+ {
+- return vldrwq_gather_base_f32 (addr, 4);
++ return vldrwq_gather_base_f32 (addr, 0);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vldrw.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_base_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_base_s32.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrw.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\](?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (uint32x4_t addr)
+ {
+- return vldrwq_gather_base_s32 (addr, 4);
++ return vldrwq_gather_base_s32 (addr, 0);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vldrw.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_base_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_base_u32.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrw.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t addr)
+ {
+- return vldrwq_gather_base_u32 (addr, 4);
++ return vldrwq_gather_base_u32 (addr, 0);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vldrw.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_base_wb_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_base_wb_f32.c
+@@ -1,16 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrw.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\]!(?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo (uint32x4_t * addr)
++foo (uint32x4_t *addr)
+ {
+- return vldrwq_gather_base_wb_f32 (addr, 8);
++ return vldrwq_gather_base_wb_f32 (addr, 0);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vldrw.32\tq\[0-9\]+, \\\[r\[0-9\]+\\\]" } } */
+-/* { dg-final { scan-assembler "vldrw.u32\tq\[0-9\]+, \\\[q\[0-9\]+, #\[0-9\]+\\\]!" } } */
+-/* { dg-final { scan-assembler "vstrw.32\tq\[0-9\]+, \\\[r\[0-9\]+\\\]" } } */
+ /* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_base_wb_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_base_wb_s32.c
+@@ -1,16 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrw.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\]!(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (uint32x4_t * addr)
++foo (uint32x4_t *addr)
+ {
+- return vldrwq_gather_base_wb_s32 (addr, 8);
++ return vldrwq_gather_base_wb_s32 (addr, 0);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vldrw.32\tq\[0-9\]+, \\\[r\[0-9\]+\\\]" } } */
+-/* { dg-final { scan-assembler "vldrw.u32\tq\[0-9\]+, \\\[q\[0-9\]+, #\[0-9\]+\\\]!" } } */
+-/* { dg-final { scan-assembler "vstrw.32\tq\[0-9\]+, \\\[r\[0-9\]+\\\]" } } */
+ /* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_base_wb_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_base_wb_u32.c
+@@ -1,16 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrw.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\]!(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint32x4_t * addr)
++foo (uint32x4_t *addr)
+ {
+- return vldrwq_gather_base_wb_u32 (addr, 8);
++ return vldrwq_gather_base_wb_u32 (addr, 0);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vldrw.32\tq\[0-9\]+, \\\[r\[0-9\]+\\\]" } } */
+-/* { dg-final { scan-assembler "vldrw.u32\tq\[0-9\]+, \\\[q\[0-9\]+, #\[0-9\]+\\\]!" } } */
+-/* { dg-final { scan-assembler "vstrw.32\tq\[0-9\]+, \\\[r\[0-9\]+\\\]" } } */
+ /* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_base_wb_z_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_base_wb_z_f32.c
+@@ -1,18 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrwt.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\]!(?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo (uint32x4_t * addr, mve_pred16_t p)
++foo (uint32x4_t *addr, mve_pred16_t p)
+ {
+- return vldrwq_gather_base_wb_z_f32 (addr, 8, p);
++ return vldrwq_gather_base_wb_z_f32 (addr, 0, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vldrw.32\tq\[0-9\]+, \\\[r\[0-9\]+\\\]" } } */
+-/* { dg-final { scan-assembler "vmsr\t P0, r\[0-9\]+.*" } } */
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vldrwt.u32\tq\[0-9\]+, \\\[q\[0-9\]+, #\[0-9\]+\\\]!" } } */
+-/* { dg-final { scan-assembler "vstrw.32\tq\[0-9\]+, \\\[r\[0-9\]+\\\]" } } */
+ /* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_base_wb_z_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_base_wb_z_s32.c
+@@ -1,18 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrwt.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\]!(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (uint32x4_t * addr, mve_pred16_t p)
++foo (uint32x4_t *addr, mve_pred16_t p)
+ {
+- return vldrwq_gather_base_wb_z_s32 (addr, 8, p);
++ return vldrwq_gather_base_wb_z_s32 (addr, 0, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vldrw.32\tq\[0-9\]+, \\\[r\[0-9\]+\\\]" } } */
+-/* { dg-final { scan-assembler "vmsr\t P0, r\[0-9\]+.*" } } */
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vldrwt.u32\tq\[0-9\]+, \\\[q\[0-9\]+, #\[0-9\]+\\\]!" } } */
+-/* { dg-final { scan-assembler "vstrw.32\tq\[0-9\]+, \\\[r\[0-9\]+\\\]" } } */
+ /* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_base_wb_z_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_base_wb_z_u32.c
+@@ -1,18 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrwt.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\]!(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint32x4_t * addr, mve_pred16_t p)
++foo (uint32x4_t *addr, mve_pred16_t p)
+ {
+- return vldrwq_gather_base_wb_z_u32 (addr, 8, p);
++ return vldrwq_gather_base_wb_z_u32 (addr, 0, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vldrw.32\tq\[0-9\]+, \\\[r\[0-9\]+\\\]" } } */
+-/* { dg-final { scan-assembler "vmsr\t P0, r\[0-9\]+.*" } } */
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vldrwt.u32\tq\[0-9\]+, \\\[q\[0-9\]+, #\[0-9\]+\\\]!" } } */
+-/* { dg-final { scan-assembler "vstrw.32\tq\[0-9\]+, \\\[r\[0-9\]+\\\]" } } */
+ /* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_base_z_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_base_z_f32.c
+@@ -1,13 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrwt.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\](?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (uint32x4_t addr, mve_pred16_t p)
+ {
+- return vldrwq_gather_base_z_f32 (addr, 4, p);
++ return vldrwq_gather_base_z_f32 (addr, 0, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vldrwt.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_base_z_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_base_z_s32.c
+@@ -1,13 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrwt.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\](?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (uint32x4_t addr, mve_pred16_t p)
+ {
+- return vldrwq_gather_base_z_s32 (addr, 4, p);
++ return vldrwq_gather_base_z_s32 (addr, 0, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vldrwt.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_base_z_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_base_z_u32.c
+@@ -1,13 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrwt.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t addr, mve_pred16_t p)
+ {
+- return vldrwq_gather_base_z_u32 (addr, 4, p);
++ return vldrwq_gather_base_z_u32 (addr, 0, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vldrwt.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_offset_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_offset_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrw.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo (float32_t const * base, uint32x4_t offset)
++foo (float32_t const *base, uint32x4_t offset)
+ {
+ return vldrwq_gather_offset_f32 (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrw.u32" } } */
+
++/*
++**foo1:
++** ...
++** vldrw.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo1 (float32_t const * base, uint32x4_t offset)
++foo1 (float32_t const *base, uint32x4_t offset)
+ {
+ return vldrwq_gather_offset (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrw.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_offset_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_offset_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrw.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int32_t const * base, uint32x4_t offset)
++foo (int32_t const *base, uint32x4_t offset)
+ {
+ return vldrwq_gather_offset_s32 (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrw.u32" } } */
+
++/*
++**foo1:
++** ...
++** vldrw.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int32_t const * base, uint32x4_t offset)
++foo1 (int32_t const *base, uint32x4_t offset)
+ {
+ return vldrwq_gather_offset (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrw.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_offset_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_offset_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrw.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint32_t const * base, uint32x4_t offset)
++foo (uint32_t const *base, uint32x4_t offset)
+ {
+ return vldrwq_gather_offset_u32 (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrw.u32" } } */
+
++/*
++**foo1:
++** ...
++** vldrw.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (uint32_t const * base, uint32x4_t offset)
++foo1 (uint32_t const *base, uint32x4_t offset)
+ {
+ return vldrwq_gather_offset (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrw.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_offset_z_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_offset_z_f32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrwt.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo (float32_t const * base, uint32x4_t offset, mve_pred16_t p)
++foo (float32_t const *base, uint32x4_t offset, mve_pred16_t p)
+ {
+ return vldrwq_gather_offset_z_f32 (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrwt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrwt.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo1 (float32_t const * base, uint32x4_t offset, mve_pred16_t p)
++foo1 (float32_t const *base, uint32x4_t offset, mve_pred16_t p)
+ {
+ return vldrwq_gather_offset_z (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrwt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_offset_z_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_offset_z_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrwt.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int32_t const * base, uint32x4_t offset, mve_pred16_t p)
++foo (int32_t const *base, uint32x4_t offset, mve_pred16_t p)
+ {
+ return vldrwq_gather_offset_z_s32 (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrwt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrwt.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int32_t const * base, uint32x4_t offset, mve_pred16_t p)
++foo1 (int32_t const *base, uint32x4_t offset, mve_pred16_t p)
+ {
+ return vldrwq_gather_offset_z (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrwt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_offset_z_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_offset_z_u32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrwt.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint32_t const * base, uint32x4_t offset, mve_pred16_t p)
++foo (uint32_t const *base, uint32x4_t offset, mve_pred16_t p)
+ {
+ return vldrwq_gather_offset_z_u32 (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrwt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrwt.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (uint32_t const * base, uint32x4_t offset, mve_pred16_t p)
++foo1 (uint32_t const *base, uint32x4_t offset, mve_pred16_t p)
+ {
+ return vldrwq_gather_offset_z (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrwt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_shifted_offset_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_shifted_offset_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrw.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #2\](?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo (float32_t const * base, uint32x4_t offset)
++foo (float32_t const *base, uint32x4_t offset)
+ {
+ return vldrwq_gather_shifted_offset_f32 (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrw.u32" } } */
+
++/*
++**foo1:
++** ...
++** vldrw.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #2\](?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo1 (float32_t const * base, uint32x4_t offset)
++foo1 (float32_t const *base, uint32x4_t offset)
+ {
+ return vldrwq_gather_shifted_offset (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrw.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_shifted_offset_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_shifted_offset_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrw.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #2\](?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int32_t const * base, uint32x4_t offset)
++foo (int32_t const *base, uint32x4_t offset)
+ {
+ return vldrwq_gather_shifted_offset_s32 (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrw.u32" } } */
+
++/*
++**foo1:
++** ...
++** vldrw.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #2\](?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int32_t const * base, uint32x4_t offset)
++foo1 (int32_t const *base, uint32x4_t offset)
+ {
+ return vldrwq_gather_shifted_offset (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrw.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_shifted_offset_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_shifted_offset_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrw.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #2\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint32_t const * base, uint32x4_t offset)
++foo (uint32_t const *base, uint32x4_t offset)
+ {
+ return vldrwq_gather_shifted_offset_u32 (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrw.u32" } } */
+
++/*
++**foo1:
++** ...
++** vldrw.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #2\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (uint32_t const * base, uint32x4_t offset)
++foo1 (uint32_t const *base, uint32x4_t offset)
+ {
+ return vldrwq_gather_shifted_offset (base, offset);
+ }
+
+-/* { dg-final { scan-assembler "vldrw.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_shifted_offset_z_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_shifted_offset_z_f32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrwt.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #2\](?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo (float32_t const * base, uint32x4_t offset, mve_pred16_t p)
++foo (float32_t const *base, uint32x4_t offset, mve_pred16_t p)
+ {
+ return vldrwq_gather_shifted_offset_z_f32 (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrwt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrwt.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #2\](?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo1 (float32_t const * base, uint32x4_t offset, mve_pred16_t p)
++foo1 (float32_t const *base, uint32x4_t offset, mve_pred16_t p)
+ {
+ return vldrwq_gather_shifted_offset_z (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrwt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_shifted_offset_z_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_shifted_offset_z_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrwt.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #2\](?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int32_t const * base, uint32x4_t offset, mve_pred16_t p)
++foo (int32_t const *base, uint32x4_t offset, mve_pred16_t p)
+ {
+ return vldrwq_gather_shifted_offset_z_s32 (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrwt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrwt.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #2\](?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int32_t const * base, uint32x4_t offset, mve_pred16_t p)
++foo1 (int32_t const *base, uint32x4_t offset, mve_pred16_t p)
+ {
+ return vldrwq_gather_shifted_offset_z (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrwt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_shifted_offset_z_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_gather_shifted_offset_z_u32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrwt.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #2\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint32_t const * base, uint32x4_t offset, mve_pred16_t p)
++foo (uint32_t const *base, uint32x4_t offset, mve_pred16_t p)
+ {
+ return vldrwq_gather_shifted_offset_z_u32 (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrwt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrwt.u32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #2\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (uint32_t const * base, uint32x4_t offset, mve_pred16_t p)
++foo1 (uint32_t const *base, uint32x4_t offset, mve_pred16_t p)
+ {
+ return vldrwq_gather_shifted_offset_z (base, offset, p);
+ }
+
+-/* { dg-final { scan-assembler "vldrwt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_s32.c
+@@ -1,14 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int32_t const * base)
++foo (int32_t const *base)
+ {
+ return vldrwq_s32 (base);
+ }
+
+-/* { dg-final { scan-assembler-times "vldrw.32" 1 } } */
++#ifdef __cplusplus
++}
++#endif
++
+ /* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_u32.c
+@@ -1,14 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vldrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint32_t const * base)
++foo (uint32_t const *base)
+ {
+ return vldrwq_u32 (base);
+ }
+
+-/* { dg-final { scan-assembler-times "vldrw.32" 1 } } */
++#ifdef __cplusplus
++}
++#endif
++
+ /* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_z_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_z_f32.c
+@@ -1,15 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ float32x4_t
+-foo (float32_t const * base, mve_pred16_t p)
++foo (float32_t const *base, mve_pred16_t p)
+ {
+ return vldrwq_z_f32 (base, p);
+ }
+
+-/* { dg-final { scan-assembler-times "vpst" 1 } } */
+-/* { dg-final { scan-assembler-times "vldrwt.32" 1 } } */
++#ifdef __cplusplus
++}
++#endif
++
+ /* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_z_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_z_s32.c
+@@ -1,15 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int32_t const * base, mve_pred16_t p)
++foo (int32_t const *base, mve_pred16_t p)
+ {
+ return vldrwq_z_s32 (base, p);
+ }
+
+-/* { dg-final { scan-assembler-times "vpst" 1 } } */
+-/* { dg-final { scan-assembler-times "vldrwt.32" 1 } } */
++#ifdef __cplusplus
++}
++#endif
++
+ /* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_z_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrwq_z_u32.c
+@@ -1,15 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vldrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint32_t const * base, mve_pred16_t p)
++foo (uint32_t const *base, mve_pred16_t p)
+ {
+ return vldrwq_z_u32 (base, p);
+ }
+
+-/* { dg-final { scan-assembler-times "vpst" 1 } } */
+-/* { dg-final { scan-assembler-times "vldrwt.32" 1 } } */
++#ifdef __cplusplus
++}
++#endif
++
+ /* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_m_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxat.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmaxaq_m_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmaxat.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxat.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmaxaq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_m_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxat.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmaxaq_m_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmaxat.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxat.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmaxaq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_m_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxat.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vmaxaq_m_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmaxat.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxat.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vmaxaq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmaxa.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int16x8_t b)
+ {
+ return vmaxaq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmaxa.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmaxa.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, int16x8_t b)
+ {
+ return vmaxaq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmaxa.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmaxa.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, int32x4_t b)
+ {
+ return vmaxaq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmaxa.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmaxa.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, int32x4_t b)
+ {
+ return vmaxaq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmaxa.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmaxa.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int8x16_t b)
+ {
+ return vmaxaq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmaxa.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmaxa.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, int8x16_t b)
+ {
+ return vmaxaq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmaxa.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_p_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_p_s16.c
+@@ -1,9 +1,24 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxavt.s16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16_t
+ foo (uint16_t a, int16x8_t b, mve_pred16_t p)
+ {
+@@ -11,18 +26,40 @@ foo (uint16_t a, int16x8_t b, mve_pred16_t p)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxavt.s16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16_t
+ foo1 (uint16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmaxavq_p (a, b, p);
+ }
+
+-
+-int16_t
+-foo2 (uint8_t a, int16x8_t b, mve_pred16_t p)
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxavt.s16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
++uint16_t
++foo2 (int16x8_t b, mve_pred16_t p)
+ {
+- return vmaxavq_p (a, b, p);
++ return vmaxavq_p (1, b, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vmaxavt.s16" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_p_s32.c
+@@ -1,9 +1,24 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxavt.s32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint32_t a, int32x4_t b, mve_pred16_t p)
+ {
+@@ -11,18 +26,40 @@ foo (uint32_t a, int32x4_t b, mve_pred16_t p)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxavt.s32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint32_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmaxavq_p (a, b, p);
+ }
+
+-
+-int32_t
+-foo2 (uint16_t a, int32x4_t b, mve_pred16_t p)
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxavt.s32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
++uint32_t
++foo2 (int32x4_t b, mve_pred16_t p)
+ {
+- return vmaxavq_p (a, b, p);
++ return vmaxavq_p (1, b, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vmaxavt.s32" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_p_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_p_s8.c
+@@ -1,9 +1,24 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxavt.s8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8_t
+ foo (uint8_t a, int8x16_t b, mve_pred16_t p)
+ {
+@@ -11,18 +26,40 @@ foo (uint8_t a, int8x16_t b, mve_pred16_t p)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxavt.s8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8_t
+ foo1 (uint8_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vmaxavq_p (a, b, p);
+ }
+
+-
+-int8_t
+-foo2 (uint32_t a, int8x16_t b, mve_pred16_t p)
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxavt.s8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
++uint8_t
++foo2 (int8x16_t b, mve_pred16_t p)
+ {
+- return vmaxavq_p (a, b, p);
++ return vmaxavq_p (1, b, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vmaxavt.s8" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_s16.c
+@@ -1,9 +1,20 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmaxav.s16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16_t
+ foo (uint16_t a, int16x8_t b)
+ {
+@@ -11,18 +22,32 @@ foo (uint16_t a, int16x8_t b)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmaxav.s16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16_t
+ foo1 (uint16_t a, int16x8_t b)
+ {
+ return vmaxavq (a, b);
+ }
+
+-
+-int16_t
+-foo2 (uint8_t a, int16x8_t b)
++/*
++**foo2:
++** ...
++** vmaxav.s16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
++uint16_t
++foo2 (int16x8_t b)
+ {
+- return vmaxavq (a, b);
++ return vmaxavq (1, b);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vmaxav.s16" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_s32.c
+@@ -1,9 +1,20 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmaxav.s32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint32_t a, int32x4_t b)
+ {
+@@ -11,18 +22,32 @@ foo (uint32_t a, int32x4_t b)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmaxav.s32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint32_t a, int32x4_t b)
+ {
+ return vmaxavq (a, b);
+ }
+
+-
+-int32_t
+-foo2 (uint16_t a, int32x4_t b)
++/*
++**foo2:
++** ...
++** vmaxav.s32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
++uint32_t
++foo2 (int32x4_t b)
+ {
+- return vmaxavq (a, b);
++ return vmaxavq (1, b);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vmaxav.s32" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_s8.c
+@@ -1,9 +1,20 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmaxav.s8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8_t
+ foo (uint8_t a, int8x16_t b)
+ {
+@@ -11,18 +22,32 @@ foo (uint8_t a, int8x16_t b)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmaxav.s8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8_t
+ foo1 (uint8_t a, int8x16_t b)
+ {
+ return vmaxavq (a, b);
+ }
+
+-
+-int8_t
+-foo2 (uint32_t a, int8x16_t b)
++/*
++**foo2:
++** ...
++** vmaxav.s8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
++uint8_t
++foo2 (int8x16_t b)
+ {
+- return vmaxavq (a, b);
++ return vmaxavq (1, b);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vmaxav.s8" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmaq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmaq_f16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmaxnma.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b)
+ {
+ return vmaxnmaq_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmaxnma.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmaxnma.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b)
+ {
+ return vmaxnmaq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmaxnma.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmaq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmaq_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmaxnma.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b)
+ {
+ return vmaxnmaq_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmaxnma.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmaxnma.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b)
+ {
+ return vmaxnmaq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmaxnma.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmaq_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmaq_m_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxnmat.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vmaxnmaq_m_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmaxnmat.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxnmat.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vmaxnmaq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmaq_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmaq_m_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxnmat.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vmaxnmaq_m_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmaxnmat.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxnmat.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vmaxnmaq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmavq_f16-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float16_t
+-foo1 (float16_t a, float16x8_t b)
+-{
+- return vmaxnmavq (a, b);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmavq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmavq_f16.c
+@@ -1,9 +1,20 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmaxnmav.f16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float16_t
+ foo (float16_t a, float16x8_t b)
+ {
+@@ -11,18 +22,32 @@ foo (float16_t a, float16x8_t b)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmaxnmav.f16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float16_t
+ foo1 (float16_t a, float16x8_t b)
+ {
+ return vmaxnmavq (a, b);
+ }
+
+-
++/*
++**foo2:
++** ...
++** vmaxnmav.f16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float16_t
+-foo2 (float32_t a, float16x8_t b)
++foo2 (float16x8_t b)
+ {
+- return vmaxnmavq (a, b);
++ return vmaxnmavq (1.1, b);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vmaxnmav.f16" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmavq_f32-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float32_t
+-foo1 (float32_t a, float32x4_t b)
+-{
+- return vmaxnmavq (a, b);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmavq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmavq_f32.c
+@@ -1,9 +1,20 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmaxnmav.f32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float32_t
+ foo (float32_t a, float32x4_t b)
+ {
+@@ -11,18 +22,32 @@ foo (float32_t a, float32x4_t b)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmaxnmav.f32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float32_t
+ foo1 (float32_t a, float32x4_t b)
+ {
+ return vmaxnmavq (a, b);
+ }
+
+-
++/*
++**foo2:
++** ...
++** vmaxnmav.f32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float32_t
+-foo2 (float16_t a, float32x4_t b)
++foo2 (float32x4_t b)
+ {
+- return vmaxnmavq (a, b);
++ return vmaxnmavq (1.1, b);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vmaxnmav.f32" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmavq_p_f16-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float16_t
+-foo1 (float16_t a, float16x8_t b, mve_pred16_t p)
+-{
+- return vmaxnmavq_p (a, b, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmavq_p_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmavq_p_f16.c
+@@ -1,9 +1,24 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxnmavt.f16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float16_t
+ foo (float16_t a, float16x8_t b, mve_pred16_t p)
+ {
+@@ -11,18 +26,40 @@ foo (float16_t a, float16x8_t b, mve_pred16_t p)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxnmavt.f16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float16_t
+ foo1 (float16_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vmaxnmavq_p (a, b, p);
+ }
+
+-
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxnmavt.f16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float16_t
+-foo2 (float32_t a, float16x8_t b, mve_pred16_t p)
++foo2 (float16x8_t b, mve_pred16_t p)
+ {
+- return vmaxnmavq_p (a, b, p);
++ return vmaxnmavq_p (1.1, b, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vmaxnmavt.f16" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmavq_p_f32-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float32_t
+-foo1 (float32_t a, float32x4_t b, mve_pred16_t p)
+-{
+- return vmaxnmavq_p (a, b, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmavq_p_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmavq_p_f32.c
+@@ -1,9 +1,24 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxnmavt.f32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float32_t
+ foo (float32_t a, float32x4_t b, mve_pred16_t p)
+ {
+@@ -11,18 +26,40 @@ foo (float32_t a, float32x4_t b, mve_pred16_t p)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxnmavt.f32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float32_t
+ foo1 (float32_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vmaxnmavq_p (a, b, p);
+ }
+
+-
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxnmavt.f32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float32_t
+-foo2 (float16_t a, float32x4_t b, mve_pred16_t p)
++foo2 (float32x4_t b, mve_pred16_t p)
+ {
+- return vmaxnmavq_p (a, b, p);
++ return vmaxnmavq_p (1.1, b, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vmaxnmavt.f32" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmq_f16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmaxnm.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b)
+ {
+ return vmaxnmq_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmaxnm.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmaxnm.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b)
+ {
+ return vmaxnmq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmaxnm.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmq_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmaxnm.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b)
+ {
+ return vmaxnmq_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmaxnm.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmaxnm.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b)
+ {
+ return vmaxnmq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmaxnm.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmq_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmq_m_f16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxnmt.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vmaxnmq_m_f16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmaxnmt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxnmt.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vmaxnmq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmaxnmt.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmq_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmq_m_f32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxnmt.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vmaxnmq_m_f32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmaxnmt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxnmt.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vmaxnmq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmaxnmt.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmq_x_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmq_x_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxnmt.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vmaxnmq_x_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmaxnmt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxnmt.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vmaxnmq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmq_x_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmq_x_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxnmt.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vmaxnmq_x_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmaxnmt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxnmt.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vmaxnmq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmvq_f16-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float16_t
+-foo1 (float16_t a, float16x8_t b)
+-{
+- return vmaxnmvq (23.35, b);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmvq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmvq_f16.c
+@@ -1,9 +1,20 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmaxnmv.f16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float16_t
+ foo (float16_t a, float16x8_t b)
+ {
+@@ -11,18 +22,32 @@ foo (float16_t a, float16x8_t b)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmaxnmv.f16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float16_t
+ foo1 (float16_t a, float16x8_t b)
+ {
+ return vmaxnmvq (a, b);
+ }
+
+-
++/*
++**foo2:
++** ...
++** vmaxnmv.f16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float16_t
+-foo2 (float32_t a, float16x8_t b)
++foo2 (float16x8_t b)
+ {
+- return vmaxnmvq (a, b);
++ return vmaxnmvq (1.1, b);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vmaxnmv.f16" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmvq_f32-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float32_t
+-foo1 (float32_t a, float32x4_t b)
+-{
+- return vmaxnmvq (34.56, b);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmvq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmvq_f32.c
+@@ -1,9 +1,20 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmaxnmv.f32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float32_t
+ foo (float32_t a, float32x4_t b)
+ {
+@@ -11,18 +22,32 @@ foo (float32_t a, float32x4_t b)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmaxnmv.f32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float32_t
+ foo1 (float32_t a, float32x4_t b)
+ {
+ return vmaxnmvq (a, b);
+ }
+
+-
++/*
++**foo2:
++** ...
++** vmaxnmv.f32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float32_t
+-foo2 (float16_t a, float32x4_t b)
++foo2 (float32x4_t b)
+ {
+- return vmaxnmvq (a, b);
++ return vmaxnmvq (1.1, b);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vmaxnmv.f32" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmvq_p_f16-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float16_t
+-foo1 (float16_t a, float16x8_t b, mve_pred16_t p)
+-{
+- return vmaxnmvq_p (a, b, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmvq_p_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmvq_p_f16.c
+@@ -1,9 +1,24 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxnmvt.f16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float16_t
+ foo (float16_t a, float16x8_t b, mve_pred16_t p)
+ {
+@@ -11,18 +26,40 @@ foo (float16_t a, float16x8_t b, mve_pred16_t p)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxnmvt.f16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float16_t
+ foo1 (float16_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vmaxnmvq_p (a, b, p);
+ }
+
+-
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxnmvt.f16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float16_t
+-foo2 (float32_t a, float16x8_t b, mve_pred16_t p)
++foo2 (float16x8_t b, mve_pred16_t p)
+ {
+- return vmaxnmvq_p (a, b, p);
++ return vmaxnmvq_p (1.1, b, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vmaxnmvt.f16" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmvq_p_f32-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float32_t
+-foo1 (float32_t a, float32x4_t b, mve_pred16_t p)
+-{
+- return vmaxnmvq_p (a, b, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmvq_p_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmvq_p_f32.c
+@@ -1,9 +1,24 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxnmvt.f32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float32_t
+ foo (float32_t a, float32x4_t b, mve_pred16_t p)
+ {
+@@ -11,18 +26,40 @@ foo (float32_t a, float32x4_t b, mve_pred16_t p)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxnmvt.f32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float32_t
+ foo1 (float32_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vmaxnmvq_p (a, b, p);
+ }
+
+-
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxnmvt.f32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float32_t
+-foo2 (float16_t a, float32x4_t b, mve_pred16_t p)
++foo2 (float32x4_t b, mve_pred16_t p)
+ {
+- return vmaxnmvq_p (a, b, p);
++ return vmaxnmvq_p (1.1, b, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vmaxnmvt.f32" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmaxq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmaxt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmaxq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmaxt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmaxq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmaxt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmaxq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmaxt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vmaxq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmaxt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vmaxq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmaxt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_m_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmaxq_m_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmaxt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmaxq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmaxt.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_m_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vmaxq_m_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmaxt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vmaxq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmaxt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_m_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vmaxq_m_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmaxt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vmaxq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmaxt.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmax.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vmaxq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmax.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmax.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vmaxq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmax.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmax.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vmaxq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmax.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmax.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vmaxq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmax.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmax.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vmaxq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmax.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmax.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vmaxq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmax.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmax.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b)
+ {
+ return vmaxq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmax.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmax.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b)
+ {
+ return vmaxq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmax.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmax.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b)
+ {
+ return vmaxq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmax.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmax.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b)
+ {
+ return vmaxq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmax.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmax.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+ return vmaxq_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmax.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmax.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b)
+ {
+ return vmaxq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmax.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmaxq_x_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmaxt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmaxq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmaxq_x_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmaxt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmaxq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vmaxq_x_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmaxt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vmaxq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmaxq_x_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmaxt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmaxq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vmaxq_x_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmaxt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vmaxq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vmaxq_x_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmaxt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vmaxq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_s16.c
+@@ -1,9 +1,24 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxvt.s16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int16_t
+ foo (int16_t a, int16x8_t b, mve_pred16_t p)
+ {
+@@ -11,18 +26,24 @@ foo (int16_t a, int16x8_t b, mve_pred16_t p)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxvt.s16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int16_t
+ foo1 (int16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmaxvq_p (a, b, p);
+ }
+
+-
+-int16_t
+-foo2 (int8_t a, int16x8_t b, mve_pred16_t p)
+-{
+- return vmaxvq_p (a, b, p);
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vmaxvt.s16" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_s32.c
+@@ -1,9 +1,24 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxvt.s32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int32_t a, int32x4_t b, mve_pred16_t p)
+ {
+@@ -11,18 +26,24 @@ foo (int32_t a, int32x4_t b, mve_pred16_t p)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxvt.s32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int32_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmaxvq_p (a, b, p);
+ }
+
+-
+-int32_t
+-foo2 (int16_t a, int32x4_t b, mve_pred16_t p)
+-{
+- return vmaxvq_p (a, b, p);
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vmaxvt.s32" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_s8.c
+@@ -1,9 +1,24 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxvt.s8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int8_t
+ foo (int8_t a, int8x16_t b, mve_pred16_t p)
+ {
+@@ -11,18 +26,24 @@ foo (int8_t a, int8x16_t b, mve_pred16_t p)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxvt.s8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int8_t
+ foo1 (int8_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vmaxvq_p (a, b, p);
+ }
+
+-
+-int8_t
+-foo2 (int32_t a, int8x16_t b, mve_pred16_t p)
+-{
+- return vmaxvq_p (a, b, p);
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vmaxvt.s8" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_u16.c
+@@ -1,9 +1,24 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxvt.u16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16_t
+ foo (uint16_t a, uint16x8_t b, mve_pred16_t p)
+ {
+@@ -11,18 +26,40 @@ foo (uint16_t a, uint16x8_t b, mve_pred16_t p)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxvt.u16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16_t
+ foo1 (uint16_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmaxvq_p (a, b, p);
+ }
+
+-
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxvt.u16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16_t
+-foo2 (uint32_t a, uint16x8_t b, mve_pred16_t p)
++foo2 (uint16x8_t b, mve_pred16_t p)
+ {
+- return vmaxvq_p (a, b, p);
++ return vmaxvq_p (1, b, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vmaxvt.u16" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_u32.c
+@@ -1,9 +1,24 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxvt.u32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint32_t a, uint32x4_t b, mve_pred16_t p)
+ {
+@@ -11,18 +26,40 @@ foo (uint32_t a, uint32x4_t b, mve_pred16_t p)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxvt.u32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint32_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vmaxvq_p (a, b, p);
+ }
+
+-
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxvt.u32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+-foo2 (uint8_t a, uint32x4_t b, mve_pred16_t p)
++foo2 (uint32x4_t b, mve_pred16_t p)
+ {
+- return vmaxvq_p (a, b, p);
++ return vmaxvq_p (1, b, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vmaxvt.u32" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_u8.c
+@@ -1,9 +1,24 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxvt.u8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8_t
+ foo (uint8_t a, uint8x16_t b, mve_pred16_t p)
+ {
+@@ -11,18 +26,40 @@ foo (uint8_t a, uint8x16_t b, mve_pred16_t p)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxvt.u8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8_t
+ foo1 (uint8_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vmaxvq_p (a, b, p);
+ }
+
+-
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmaxvt.u8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8_t
+-foo2 (uint16_t a, uint8x16_t b, mve_pred16_t p)
++foo2 (uint8x16_t b, mve_pred16_t p)
+ {
+- return vmaxvq_p (a, b, p);
++ return vmaxvq_p (1, b, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vmaxvt.u8" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_s16.c
+@@ -1,9 +1,20 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmaxv.s16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int16_t
+ foo (int16_t a, int16x8_t b)
+ {
+@@ -11,18 +22,20 @@ foo (int16_t a, int16x8_t b)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmaxv.s16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int16_t
+ foo1 (int16_t a, int16x8_t b)
+ {
+ return vmaxvq (a, b);
+ }
+
+-
+-int16_t
+-foo2 (int8_t a, int16x8_t b)
+-{
+- return vmaxvq (a, b);
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vmaxv.s16" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_s32.c
+@@ -1,9 +1,20 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmaxv.s32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int32_t a, int32x4_t b)
+ {
+@@ -11,18 +22,20 @@ foo (int32_t a, int32x4_t b)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmaxv.s32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int32_t a, int32x4_t b)
+ {
+ return vmaxvq (a, b);
+ }
+
+-
+-int32_t
+-foo2 (int16_t a, int32x4_t b)
+-{
+- return vmaxvq (a, b);
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vmaxv.s32" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_s8.c
+@@ -1,9 +1,20 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmaxv.s8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int8_t
+ foo (int8_t a, int8x16_t b)
+ {
+@@ -11,18 +22,20 @@ foo (int8_t a, int8x16_t b)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmaxv.s8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int8_t
+ foo1 (int8_t a, int8x16_t b)
+ {
+ return vmaxvq (a, b);
+ }
+
+-
+-int8_t
+-foo2 (int32_t a, int8x16_t b)
+-{
+- return vmaxvq (a, b);
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vmaxv.s8" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_u16.c
+@@ -1,9 +1,20 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmaxv.u16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16_t
+ foo (uint16_t a, uint16x8_t b)
+ {
+@@ -11,18 +22,32 @@ foo (uint16_t a, uint16x8_t b)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmaxv.u16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16_t
+ foo1 (uint16_t a, uint16x8_t b)
+ {
+ return vmaxvq (a, b);
+ }
+
+-
++/*
++**foo2:
++** ...
++** vmaxv.u16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16_t
+-foo2 (uint32_t a, uint16x8_t b)
++foo2 (uint16x8_t b)
+ {
+- return vmaxvq (a, b);
++ return vmaxvq (1, b);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vmaxv.u16" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_u32.c
+@@ -1,9 +1,20 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmaxv.u32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint32_t a, uint32x4_t b)
+ {
+@@ -11,18 +22,32 @@ foo (uint32_t a, uint32x4_t b)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmaxv.u32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint32_t a, uint32x4_t b)
+ {
+ return vmaxvq (a, b);
+ }
+
+-
++/*
++**foo2:
++** ...
++** vmaxv.u32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+-foo2 (uint8_t a, uint32x4_t b)
++foo2 (uint32x4_t b)
+ {
+- return vmaxvq (a, b);
++ return vmaxvq (1, b);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vmaxv.u32" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_u8.c
+@@ -1,9 +1,20 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmaxv.u8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8_t
+ foo (uint8_t a, uint8x16_t b)
+ {
+@@ -11,18 +22,32 @@ foo (uint8_t a, uint8x16_t b)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmaxv.u8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8_t
+ foo1 (uint8_t a, uint8x16_t b)
+ {
+ return vmaxvq (a, b);
+ }
+
+-
++/*
++**foo2:
++** ...
++** vmaxv.u8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8_t
+-foo2 (uint16_t a, uint8x16_t b)
++foo2 (uint8x16_t b)
+ {
+- return vmaxvq (a, b);
++ return vmaxvq (1, b);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vmaxv.u8" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_m_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminat.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vminaq_m_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vminat.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminat.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vminaq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_m_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminat.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vminaq_m_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vminat.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminat.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vminaq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_m_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminat.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vminaq_m_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vminat.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminat.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vminaq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmina.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int16x8_t b)
+ {
+ return vminaq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmina.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmina.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, int16x8_t b)
+ {
+ return vminaq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmina.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmina.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, int32x4_t b)
+ {
+ return vminaq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmina.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmina.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, int32x4_t b)
+ {
+ return vminaq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmina.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmina.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int8x16_t b)
+ {
+ return vminaq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmina.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmina.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, int8x16_t b)
+ {
+ return vminaq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmina.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_p_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_p_s16.c
+@@ -1,9 +1,24 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminavt.s16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16_t
+ foo (uint16_t a, int16x8_t b, mve_pred16_t p)
+ {
+@@ -11,18 +26,40 @@ foo (uint16_t a, int16x8_t b, mve_pred16_t p)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminavt.s16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16_t
+ foo1 (uint16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vminavq_p (a, b, p);
+ }
+
+-
+-int16_t
+-foo2 (uint8_t a, int16x8_t b, mve_pred16_t p)
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminavt.s16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
++uint16_t
++foo2 (int16x8_t b, mve_pred16_t p)
+ {
+- return vminavq_p (a, b, p);
++ return vminavq_p (1, b, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vminavt.s16" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_p_s32.c
+@@ -1,9 +1,24 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminavt.s32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint32_t a, int32x4_t b, mve_pred16_t p)
+ {
+@@ -11,18 +26,40 @@ foo (uint32_t a, int32x4_t b, mve_pred16_t p)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminavt.s32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint32_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vminavq_p (a, b, p);
+ }
+
+-
+-int32_t
+-foo2 (uint16_t a, int32x4_t b, mve_pred16_t p)
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminavt.s32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
++uint32_t
++foo2 (int32x4_t b, mve_pred16_t p)
+ {
+- return vminavq_p (a, b, p);
++ return vminavq_p (1, b, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vminavt.s32" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_p_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_p_s8.c
+@@ -1,9 +1,24 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminavt.s8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8_t
+ foo (uint8_t a, int8x16_t b, mve_pred16_t p)
+ {
+@@ -11,18 +26,40 @@ foo (uint8_t a, int8x16_t b, mve_pred16_t p)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminavt.s8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8_t
+ foo1 (uint8_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vminavq_p (a, b, p);
+ }
+
+-
+-int8_t
+-foo2 (uint32_t a, int8x16_t b, mve_pred16_t p)
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminavt.s8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
++uint8_t
++foo2 (int8x16_t b, mve_pred16_t p)
+ {
+- return vminavq_p (a, b, p);
++ return vminavq_p (1, b, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vminavt.s8" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_s16.c
+@@ -1,9 +1,20 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vminav.s16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16_t
+ foo (uint16_t a, int16x8_t b)
+ {
+@@ -11,18 +22,32 @@ foo (uint16_t a, int16x8_t b)
+ }
+
+
++/*
++**foo1:
++** ...
++** vminav.s16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16_t
+ foo1 (uint16_t a, int16x8_t b)
+ {
+ return vminavq (a, b);
+ }
+
+-
+-int16_t
+-foo2 (uint8_t a, int16x8_t b)
++/*
++**foo2:
++** ...
++** vminav.s16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
++uint16_t
++foo2 (int16x8_t b)
+ {
+- return vminavq (a, b);
++ return vminavq (1, b);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vminav.s16" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_s32.c
+@@ -1,9 +1,20 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vminav.s32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint32_t a, int32x4_t b)
+ {
+@@ -11,18 +22,32 @@ foo (uint32_t a, int32x4_t b)
+ }
+
+
++/*
++**foo1:
++** ...
++** vminav.s32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint32_t a, int32x4_t b)
+ {
+ return vminavq (a, b);
+ }
+
+-
+-int32_t
+-foo2 (uint16_t a, int32x4_t b)
++/*
++**foo2:
++** ...
++** vminav.s32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
++uint32_t
++foo2 (int32x4_t b)
+ {
+- return vminavq (a, b);
++ return vminavq (1, b);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vminav.s32" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_s8.c
+@@ -1,9 +1,20 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vminav.s8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8_t
+ foo (uint8_t a, int8x16_t b)
+ {
+@@ -11,18 +22,32 @@ foo (uint8_t a, int8x16_t b)
+ }
+
+
++/*
++**foo1:
++** ...
++** vminav.s8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8_t
+ foo1 (uint8_t a, int8x16_t b)
+ {
+ return vminavq (a, b);
+ }
+
+-
+-int8_t
+-foo2 (uint32_t a, int8x16_t b)
++/*
++**foo2:
++** ...
++** vminav.s8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
++uint8_t
++foo2 (int8x16_t b)
+ {
+- return vminavq (a, b);
++ return vminavq (1, b);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vminav.s8" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmaq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmaq_f16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vminnma.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b)
+ {
+ return vminnmaq_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vminnma.f16" } } */
+
++/*
++**foo1:
++** ...
++** vminnma.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b)
+ {
+ return vminnmaq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vminnma.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmaq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmaq_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vminnma.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b)
+ {
+ return vminnmaq_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vminnma.f32" } } */
+
++/*
++**foo1:
++** ...
++** vminnma.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b)
+ {
+ return vminnmaq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vminnma.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmaq_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmaq_m_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminnmat.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vminnmaq_m_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vminnmat.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminnmat.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vminnmaq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmaq_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmaq_m_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminnmat.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vminnmaq_m_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vminnmat.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminnmat.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vminnmaq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmavq_f16-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float16_t
+-foo1 (float16_t a, float16x8_t b)
+-{
+- return vminnmavq (a, b);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmavq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmavq_f16.c
+@@ -1,9 +1,20 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vminnmav.f16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float16_t
+ foo (float16_t a, float16x8_t b)
+ {
+@@ -11,18 +22,32 @@ foo (float16_t a, float16x8_t b)
+ }
+
+
++/*
++**foo1:
++** ...
++** vminnmav.f16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float16_t
+ foo1 (float16_t a, float16x8_t b)
+ {
+ return vminnmavq (a, b);
+ }
+
+-
++/*
++**foo2:
++** ...
++** vminnmav.f16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float16_t
+-foo2 (float32_t a, float16x8_t b)
++foo2 (float16x8_t b)
+ {
+- return vminnmavq (a, b);
++ return vminnmavq (1.1, b);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vminnmav.f16" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmavq_f32-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float32_t
+-foo1 (float32_t a, float32x4_t b)
+-{
+- return vminnmavq (a, b);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmavq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmavq_f32.c
+@@ -1,9 +1,20 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vminnmav.f32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float32_t
+ foo (float32_t a, float32x4_t b)
+ {
+@@ -11,18 +22,32 @@ foo (float32_t a, float32x4_t b)
+ }
+
+
++/*
++**foo1:
++** ...
++** vminnmav.f32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float32_t
+ foo1 (float32_t a, float32x4_t b)
+ {
+ return vminnmavq (a, b);
+ }
+
+-
++/*
++**foo2:
++** ...
++** vminnmav.f32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float32_t
+-foo2 (float16_t a, float32x4_t b)
++foo2 (float32x4_t b)
+ {
+- return vminnmavq (a, b);
++ return vminnmavq (1.1, b);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vminnmav.f32" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmavq_p_f16-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float16_t
+-foo1 (float16_t a, float16x8_t b, mve_pred16_t p)
+-{
+- return vminnmavq_p (a, b, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmavq_p_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmavq_p_f16.c
+@@ -1,9 +1,24 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminnmavt.f16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float16_t
+ foo (float16_t a, float16x8_t b, mve_pred16_t p)
+ {
+@@ -11,18 +26,40 @@ foo (float16_t a, float16x8_t b, mve_pred16_t p)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminnmavt.f16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float16_t
+ foo1 (float16_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vminnmavq_p (a, b, p);
+ }
+
+-
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminnmavt.f16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float16_t
+-foo2 (float32_t a, float16x8_t b, mve_pred16_t p)
++foo2 (float16x8_t b, mve_pred16_t p)
+ {
+- return vminnmavq_p (a, b, p);
++ return vminnmavq_p (1.1, b, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vminnmavt.f16" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmavq_p_f32-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float32_t
+-foo1 (float32_t a, float32x4_t b, mve_pred16_t p)
+-{
+- return vminnmavq_p (a, b, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmavq_p_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmavq_p_f32.c
+@@ -1,9 +1,24 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminnmavt.f32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float32_t
+ foo (float32_t a, float32x4_t b, mve_pred16_t p)
+ {
+@@ -11,18 +26,40 @@ foo (float32_t a, float32x4_t b, mve_pred16_t p)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminnmavt.f32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float32_t
+ foo1 (float32_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vminnmavq_p (a, b, p);
+ }
+
+-
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminnmavt.f32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float32_t
+-foo2 (float16_t a, float32x4_t b, mve_pred16_t p)
++foo2 (float32x4_t b, mve_pred16_t p)
+ {
+- return vminnmavq_p (a, b, p);
++ return vminnmavq_p (1.1, b, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vminnmavt.f32" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmq_f16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vminnm.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b)
+ {
+ return vminnmq_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vminnm.f16" } } */
+
++/*
++**foo1:
++** ...
++** vminnm.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b)
+ {
+ return vminnmq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vminnm.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmq_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vminnm.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b)
+ {
+ return vminnmq_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vminnm.f32" } } */
+
++/*
++**foo1:
++** ...
++** vminnm.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b)
+ {
+ return vminnmq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vminnm.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmq_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmq_m_f16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminnmt.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vminnmq_m_f16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vminnmt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminnmt.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vminnmq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vminnmt.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmq_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmq_m_f32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminnmt.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vminnmq_m_f32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vminnmt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminnmt.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vminnmq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vminnmt.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmq_x_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmq_x_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminnmt.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vminnmq_x_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vminnmt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminnmt.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vminnmq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmq_x_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmq_x_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminnmt.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vminnmq_x_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vminnmt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminnmt.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vminnmq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmvq_f16-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float16_t
+-foo1 (float16_t a, float16x8_t b)
+-{
+- return vminnmvq (a, b);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmvq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmvq_f16.c
+@@ -1,9 +1,20 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vminnmv.f16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float16_t
+ foo (float16_t a, float16x8_t b)
+ {
+@@ -11,18 +22,32 @@ foo (float16_t a, float16x8_t b)
+ }
+
+
++/*
++**foo1:
++** ...
++** vminnmv.f16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float16_t
+ foo1 (float16_t a, float16x8_t b)
+ {
+ return vminnmvq (a, b);
+ }
+
+-
++/*
++**foo2:
++** ...
++** vminnmv.f16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float16_t
+-foo2 (float32_t a, float16x8_t b)
++foo2 (float16x8_t b)
+ {
+- return vminnmvq (a, b);
++ return vminnmvq (1.1, b);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vminnmv.f16" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmvq_f32-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float32_t
+-foo1 (float32_t a, float32x4_t b)
+-{
+- return vminnmvq (a, b);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmvq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmvq_f32.c
+@@ -1,9 +1,20 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vminnmv.f32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float32_t
+ foo (float32_t a, float32x4_t b)
+ {
+@@ -11,18 +22,32 @@ foo (float32_t a, float32x4_t b)
+ }
+
+
++/*
++**foo1:
++** ...
++** vminnmv.f32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float32_t
+ foo1 (float32_t a, float32x4_t b)
+ {
+ return vminnmvq (a, b);
+ }
+
+-
++/*
++**foo2:
++** ...
++** vminnmv.f32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float32_t
+-foo2 (float16_t a, float32x4_t b)
++foo2 (float32x4_t b)
+ {
+- return vminnmvq (a, b);
++ return vminnmvq (1.1, b);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vminnmv.f32" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmvq_p_f16-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float16_t
+-foo1 (float16_t a, float16x8_t b, mve_pred16_t p)
+-{
+- return vminnmvq_p (a, b, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmvq_p_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmvq_p_f16.c
+@@ -1,9 +1,24 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminnmvt.f16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float16_t
+ foo (float16_t a, float16x8_t b, mve_pred16_t p)
+ {
+@@ -11,18 +26,40 @@ foo (float16_t a, float16x8_t b, mve_pred16_t p)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminnmvt.f16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float16_t
+ foo1 (float16_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vminnmvq_p (a, b, p);
+ }
+
+-
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminnmvt.f16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float16_t
+-foo2 (float32_t a, float16x8_t b, mve_pred16_t p)
++foo2 (float16x8_t b, mve_pred16_t p)
+ {
+- return vminnmvq_p (a, b, p);
++ return vminnmvq_p (1.1, b, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vminnmvt.f16" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmvq_p_f32-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float32_t
+-foo1 (float32_t a, float32x4_t b, mve_pred16_t p)
+-{
+- return vminnmvq_p (a, b, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmvq_p_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmvq_p_f32.c
+@@ -1,9 +1,24 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminnmvt.f32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float32_t
+ foo (float32_t a, float32x4_t b, mve_pred16_t p)
+ {
+@@ -11,18 +26,40 @@ foo (float32_t a, float32x4_t b, mve_pred16_t p)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminnmvt.f32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float32_t
+ foo1 (float32_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vminnmvq_p (a, b, p);
+ }
+
+-
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminnmvt.f32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ float32_t
+-foo2 (float16_t a, float32x4_t b, mve_pred16_t p)
++foo2 (float32x4_t b, mve_pred16_t p)
+ {
+- return vminnmvq_p (a, b, p);
++ return vminnmvq_p (1.1, b, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vminnmvt.f32" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmint.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vminq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmint.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmint.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vminq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmint.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmint.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vminq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmint.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmint.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vminq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmint.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmint.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vminq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmint.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmint.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vminq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmint.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_m_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmint.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vminq_m_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmint.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmint.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vminq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmint.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_m_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmint.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vminq_m_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmint.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmint.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vminq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmint.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_m_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmint.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vminq_m_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmint.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmint.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vminq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmint.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmin.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vminq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmin.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmin.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vminq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmin.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmin.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vminq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmin.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmin.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vminq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmin.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmin.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vminq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmin.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmin.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vminq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmin.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmin.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b)
+ {
+ return vminq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmin.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmin.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b)
+ {
+ return vminq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmin.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmin.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b)
+ {
+ return vminq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmin.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmin.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b)
+ {
+ return vminq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmin.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmin.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+ return vminq_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmin.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmin.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b)
+ {
+ return vminq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmin.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmint.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vminq_x_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmint.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmint.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vminq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmint.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vminq_x_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmint.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmint.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vminq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmint.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vminq_x_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmint.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmint.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vminq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmint.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vminq_x_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmint.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmint.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vminq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmint.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vminq_x_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmint.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmint.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vminq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmint.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vminq_x_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmint.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmint.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vminq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_s16.c
+@@ -1,9 +1,24 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminvt.s16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int16_t
+ foo (int16_t a, int16x8_t b, mve_pred16_t p)
+ {
+@@ -11,18 +26,24 @@ foo (int16_t a, int16x8_t b, mve_pred16_t p)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminvt.s16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int16_t
+ foo1 (int16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vminvq_p (a, b, p);
+ }
+
+-
+-int16_t
+-foo2 (int8_t a, int16x8_t b, mve_pred16_t p)
+-{
+- return vminvq_p (a, b, p);
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vminvt.s16" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_s32.c
+@@ -1,9 +1,24 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminvt.s32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int32_t a, int32x4_t b, mve_pred16_t p)
+ {
+@@ -11,18 +26,24 @@ foo (int32_t a, int32x4_t b, mve_pred16_t p)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminvt.s32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int32_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vminvq_p (a, b, p);
+ }
+
+-
+-int32_t
+-foo2 (int16_t a, int32x4_t b, mve_pred16_t p)
+-{
+- return vminvq_p (a, b, p);
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vminvt.s32" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_s8.c
+@@ -1,9 +1,24 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminvt.s8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int8_t
+ foo (int8_t a, int8x16_t b, mve_pred16_t p)
+ {
+@@ -11,18 +26,24 @@ foo (int8_t a, int8x16_t b, mve_pred16_t p)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminvt.s8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int8_t
+ foo1 (int8_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vminvq_p (a, b, p);
+ }
+
+-
+-int8_t
+-foo2 (int32_t a, int8x16_t b, mve_pred16_t p)
+-{
+- return vminvq_p (a, b, p);
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vminvt.s8" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_u16.c
+@@ -1,9 +1,24 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminvt.u16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16_t
+ foo (uint16_t a, uint16x8_t b, mve_pred16_t p)
+ {
+@@ -11,18 +26,40 @@ foo (uint16_t a, uint16x8_t b, mve_pred16_t p)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminvt.u16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16_t
+ foo1 (uint16_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vminvq_p (a, b, p);
+ }
+
+-
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminvt.u16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16_t
+-foo2 (uint32_t a, uint16x8_t b, mve_pred16_t p)
++foo2 (uint16x8_t b, mve_pred16_t p)
+ {
+- return vminvq_p (a, b, p);
++ return vminvq_p (1, b, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vminvt.u16" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_u32.c
+@@ -1,9 +1,24 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminvt.u32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint32_t a, uint32x4_t b, mve_pred16_t p)
+ {
+@@ -11,18 +26,40 @@ foo (uint32_t a, uint32x4_t b, mve_pred16_t p)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminvt.u32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint32_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vminvq_p (a, b, p);
+ }
+
+-
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminvt.u32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+-foo2 (uint8_t a, uint32x4_t b, mve_pred16_t p)
++foo2 (uint32x4_t b, mve_pred16_t p)
+ {
+- return vminvq_p (a, b, p);
++ return vminvq_p (1, b, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vminvt.u32" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_u8.c
+@@ -1,9 +1,24 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminvt.u8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8_t
+ foo (uint8_t a, uint8x16_t b, mve_pred16_t p)
+ {
+@@ -11,18 +26,40 @@ foo (uint8_t a, uint8x16_t b, mve_pred16_t p)
+ }
+
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminvt.u8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8_t
+ foo1 (uint8_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vminvq_p (a, b, p);
+ }
+
+-
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vminvt.u8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8_t
+-foo2 (uint16_t a, uint8x16_t b, mve_pred16_t p)
++foo2 (uint8x16_t b, mve_pred16_t p)
+ {
+- return vminvq_p (a, b, p);
++ return vminvq_p (1, b, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vminvt.u8" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_s16.c
+@@ -1,9 +1,20 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vminv.s16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int16_t
+ foo (int16_t a, int16x8_t b)
+ {
+@@ -11,17 +22,20 @@ foo (int16_t a, int16x8_t b)
+ }
+
+
++/*
++**foo1:
++** ...
++** vminv.s16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int16_t
+ foo1 (int16_t a, int16x8_t b)
+ {
+ return vminvq (a, b);
+ }
+
+-int16_t
+-foo2 (int8_t a, int16x8_t b)
+-{
+- return vminvq (a, b);
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vminv.s16" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_s32.c
+@@ -1,9 +1,20 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vminv.s32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int32_t a, int32x4_t b)
+ {
+@@ -11,17 +22,20 @@ foo (int32_t a, int32x4_t b)
+ }
+
+
++/*
++**foo1:
++** ...
++** vminv.s32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int32_t a, int32x4_t b)
+ {
+ return vminvq (a, b);
+ }
+
+-int32_t
+-foo2 (int8_t a, int32x4_t b)
+-{
+- return vminvq (a, b);
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vminv.s32" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_s8.c
+@@ -1,9 +1,20 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vminv.s8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int8_t
+ foo (int8_t a, int8x16_t b)
+ {
+@@ -11,17 +22,20 @@ foo (int8_t a, int8x16_t b)
+ }
+
+
++/*
++**foo1:
++** ...
++** vminv.s8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ int8_t
+ foo1 (int8_t a, int8x16_t b)
+ {
+ return vminvq (a, b);
+ }
+
+-int8_t
+-foo2 (int32_t a, int8x16_t b)
+-{
+- return vminvq (a, b);
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vminv.s8" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_u16.c
+@@ -1,9 +1,20 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vminv.u16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16_t
+ foo (uint16_t a, uint16x8_t b)
+ {
+@@ -11,18 +22,32 @@ foo (uint16_t a, uint16x8_t b)
+ }
+
+
++/*
++**foo1:
++** ...
++** vminv.u16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16_t
+ foo1 (uint16_t a, uint16x8_t b)
+ {
+ return vminvq (a, b);
+ }
+
+-
+-uint8_t
+-foo2 (uint32_t a, uint16x8_t b)
++/*
++**foo2:
++** ...
++** vminv.u16 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
++uint16_t
++foo2 (uint16x8_t b)
+ {
+- return vminvq (a, b);
++ return vminvq (1, b);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vminv.u16" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_u32.c
+@@ -1,9 +1,20 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vminv.u32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo (uint32_t a, uint32x4_t b)
+ {
+@@ -11,17 +22,32 @@ foo (uint32_t a, uint32x4_t b)
+ }
+
+
++/*
++**foo1:
++** ...
++** vminv.u32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+ foo1 (uint32_t a, uint32x4_t b)
+ {
+ return vminvq (a, b);
+ }
+
++/*
++**foo2:
++** ...
++** vminv.u32 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+-foo2 (uint16_t a, uint32x4_t b)
++foo2 (uint32x4_t b)
+ {
+- return vminvq (a, b);
++ return vminvq (1, b);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vminv.u32" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_u8.c
+@@ -1,9 +1,20 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vminv.u8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8_t
+ foo (uint8_t a, uint8x16_t b)
+ {
+@@ -11,18 +22,32 @@ foo (uint8_t a, uint8x16_t b)
+ }
+
+
++/*
++**foo1:
++** ...
++** vminv.u8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8_t
+ foo1 (uint8_t a, uint8x16_t b)
+ {
+ return vminvq (a, b);
+ }
+
+-
+-uint16_t
+-foo2 (uint32_t a, uint8x16_t b)
++/*
++**foo2:
++** ...
++** vminv.u8 (?:ip|fp|r[0-9]+), q[0-9]+(?: @.*|)
++** ...
++*/
++uint8_t
++foo2 (uint8x16_t b)
+ {
+- return vminvq (a, b);
++ return vminvq (1, b);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+-/* { dg-final { scan-assembler-times "vminv.u8" 3 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_p_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_p_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavat.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo (int32_t a, int16x8_t b, int16x8_t c, mve_pred16_t p)
++foo (int32_t add, int16x8_t m1, int16x8_t m2, mve_pred16_t p)
+ {
+- return vmladavaq_p_s16 (a, b, c, p);
++ return vmladavaq_p_s16 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vmladavat.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavat.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo1 (int32_t a, int16x8_t b, int16x8_t c, mve_pred16_t p)
++foo1 (int32_t add, int16x8_t m1, int16x8_t m2, mve_pred16_t p)
+ {
+- return vmladavaq_p (a, b, c, p);
++ return vmladavaq_p (add, m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladavat.s16" } } */
+-/* { dg-final { scan-assembler "vmladavat.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_p_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavat.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo (int32_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
++foo (int32_t add, int32x4_t m1, int32x4_t m2, mve_pred16_t p)
+ {
+- return vmladavaq_p_s32 (a, b, c, p);
++ return vmladavaq_p_s32 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vmladavat.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavat.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo1 (int32_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
++foo1 (int32_t add, int32x4_t m1, int32x4_t m2, mve_pred16_t p)
+ {
+- return vmladavaq_p (a, b, c, p);
++ return vmladavaq_p (add, m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladavat.s32" } } */
+-/* { dg-final { scan-assembler "vmladavat.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_p_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_p_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavat.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo (int32_t a, int8x16_t b, int8x16_t c, mve_pred16_t p)
++foo (int32_t add, int8x16_t m1, int8x16_t m2, mve_pred16_t p)
+ {
+- return vmladavaq_p_s8 (a, b, c, p);
++ return vmladavaq_p_s8 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vmladavat.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavat.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo1 (int32_t a, int8x16_t b, int8x16_t c, mve_pred16_t p)
++foo1 (int32_t add, int8x16_t m1, int8x16_t m2, mve_pred16_t p)
+ {
+- return vmladavaq_p (a, b, c, p);
++ return vmladavaq_p (add, m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladavat.s8" } } */
+-/* { dg-final { scan-assembler "vmladavat.s8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_p_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_p_u16.c
+@@ -1,22 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavat.u16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+-foo (uint32_t a, uint16x8_t b, uint16x8_t c, mve_pred16_t p)
++foo (uint32_t add, uint16x8_t m1, uint16x8_t m2, mve_pred16_t p)
+ {
+- return vmladavaq_p_u16 (a, b, c, p);
++ return vmladavaq_p_u16 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vmladavat.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavat.u16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+-foo1 (uint32_t a, uint16x8_t b, uint16x8_t c, mve_pred16_t p)
++foo1 (uint32_t add, uint16x8_t m1, uint16x8_t m2, mve_pred16_t p)
+ {
+- return vmladavaq_p (a, b, c, p);
++ return vmladavaq_p (add, m1, m2, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavat.u16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++uint32_t
++foo2 (uint16x8_t m1, uint16x8_t m2, mve_pred16_t p)
++{
++ return vmladavaq_p (1, m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladavat.u16" } } */
+-/* { dg-final { scan-assembler "vmladavat.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_p_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_p_u32.c
+@@ -1,22 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavat.u32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+-foo (uint32_t a, uint32x4_t b, uint32x4_t c, mve_pred16_t p)
++foo (uint32_t add, uint32x4_t m1, uint32x4_t m2, mve_pred16_t p)
+ {
+- return vmladavaq_p_u32 (a, b, c, p);
++ return vmladavaq_p_u32 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vmladavat.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavat.u32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+-foo1 (uint32_t a, uint32x4_t b, uint32x4_t c, mve_pred16_t p)
++foo1 (uint32_t add, uint32x4_t m1, uint32x4_t m2, mve_pred16_t p)
+ {
+- return vmladavaq_p (a, b, c, p);
++ return vmladavaq_p (add, m1, m2, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavat.u32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++uint32_t
++foo2 (uint32x4_t m1, uint32x4_t m2, mve_pred16_t p)
++{
++ return vmladavaq_p (1, m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladavat.u32" } } */
+-/* { dg-final { scan-assembler "vmladavat.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_p_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_p_u8.c
+@@ -1,22 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavat.u8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+-foo (uint32_t a, uint8x16_t b, uint8x16_t c, mve_pred16_t p)
++foo (uint32_t add, uint8x16_t m1, uint8x16_t m2, mve_pred16_t p)
+ {
+- return vmladavaq_p_u8 (a, b, c, p);
++ return vmladavaq_p_u8 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vmladavat.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavat.u8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+-foo1 (uint32_t a, uint8x16_t b, uint8x16_t c, mve_pred16_t p)
++foo1 (uint32_t add, uint8x16_t m1, uint8x16_t m2, mve_pred16_t p)
+ {
+- return vmladavaq_p (a, b, c, p);
++ return vmladavaq_p (add, m1, m2, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavat.u8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++uint32_t
++foo2 (uint8x16_t m1, uint8x16_t m2, mve_pred16_t p)
++{
++ return vmladavaq_p (1, m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladavat.u8" } } */
+-/* { dg-final { scan-assembler "vmladavat.u8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmladava.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo (int32_t a, int16x8_t b, int16x8_t c)
++foo (int32_t add, int16x8_t m1, int16x8_t m2)
+ {
+- return vmladavaq_s16 (a, b, c);
++ return vmladavaq_s16 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmladava.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmladava.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo1 (int32_t a, int16x8_t b, int16x8_t c)
++foo1 (int32_t add, int16x8_t m1, int16x8_t m2)
+ {
+- return vmladavaq (a, b, c);
++ return vmladavaq (add, m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladava.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmladava.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo (int32_t a, int32x4_t b, int32x4_t c)
++foo (int32_t add, int32x4_t m1, int32x4_t m2)
+ {
+- return vmladavaq_s32 (a, b, c);
++ return vmladavaq_s32 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmladava.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmladava.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo1 (int32_t a, int32x4_t b, int32x4_t c)
++foo1 (int32_t add, int32x4_t m1, int32x4_t m2)
+ {
+- return vmladavaq (a, b, c);
++ return vmladavaq (add, m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladava.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmladava.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo (int32_t a, int8x16_t b, int8x16_t c)
++foo (int32_t add, int8x16_t m1, int8x16_t m2)
+ {
+- return vmladavaq_s8 (a, b, c);
++ return vmladavaq_s8 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmladava.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmladava.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo1 (int32_t a, int8x16_t b, int8x16_t c)
++foo1 (int32_t add, int8x16_t m1, int8x16_t m2)
+ {
+- return vmladavaq (a, b, c);
++ return vmladavaq (add, m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladava.s8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_u16.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmladava.u16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+-foo (uint32_t a, uint16x8_t b, uint16x8_t c)
++foo (uint32_t add, uint16x8_t m1, uint16x8_t m2)
+ {
+- return vmladavaq_u16 (a, b, c);
++ return vmladavaq_u16 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmladava.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmladava.u16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+-foo1 (uint32_t a, uint16x8_t b, uint16x8_t c)
++foo1 (uint32_t add, uint16x8_t m1, uint16x8_t m2)
+ {
+- return vmladavaq (a, b, c);
++ return vmladavaq (add, m1, m2);
++}
++
++/*
++**foo2:
++** ...
++** vmladava.u16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++uint32_t
++foo2 (uint16x8_t m1, uint16x8_t m2)
++{
++ return vmladavaq (1, m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladava.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_u32.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmladava.u32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+-foo (uint32_t a, uint32x4_t b, uint32x4_t c)
++foo (uint32_t add, uint32x4_t m1, uint32x4_t m2)
+ {
+- return vmladavaq_u32 (a, b, c);
++ return vmladavaq_u32 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmladava.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmladava.u32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+-foo1 (uint32_t a, uint32x4_t b, uint32x4_t c)
++foo1 (uint32_t add, uint32x4_t m1, uint32x4_t m2)
+ {
+- return vmladavaq (a, b, c);
++ return vmladavaq (add, m1, m2);
++}
++
++/*
++**foo2:
++** ...
++** vmladava.u32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++uint32_t
++foo2 (uint32x4_t m1, uint32x4_t m2)
++{
++ return vmladavaq (1, m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladava.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_u8.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmladava.u8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+-foo (uint32_t a, uint8x16_t b, uint8x16_t c)
++foo (uint32_t add, uint8x16_t m1, uint8x16_t m2)
+ {
+- return vmladavaq_u8 (a, b, c);
++ return vmladavaq_u8 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmladava.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmladava.u8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+-foo1 (uint32_t a, uint8x16_t b, uint8x16_t c)
++foo1 (uint32_t add, uint8x16_t m1, uint8x16_t m2)
+ {
+- return vmladavaq (a, b, c);
++ return vmladavaq (add, m1, m2);
++}
++
++/*
++**foo2:
++** ...
++** vmladava.u8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++uint32_t
++foo2 (uint8x16_t m1, uint8x16_t m2)
++{
++ return vmladavaq (1, m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladava.u8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaxq_p_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaxq_p_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavaxt.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo (int32_t a, int16x8_t b, int16x8_t c, mve_pred16_t p)
++foo (int32_t add, int16x8_t m1, int16x8_t m2, mve_pred16_t p)
+ {
+- return vmladavaxq_p_s16 (a, b, c, p);
++ return vmladavaxq_p_s16 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vmladavaxt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavaxt.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo1 (int32_t a, int16x8_t b, int16x8_t c, mve_pred16_t p)
++foo1 (int32_t add, int16x8_t m1, int16x8_t m2, mve_pred16_t p)
+ {
+- return vmladavaxq_p (a, b, c, p);
++ return vmladavaxq_p (add, m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladavaxt.s16" } } */
+-/* { dg-final { scan-assembler "vmladavaxt.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaxq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaxq_p_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavaxt.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo (int32_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
++foo (int32_t add, int32x4_t m1, int32x4_t m2, mve_pred16_t p)
+ {
+- return vmladavaxq_p_s32 (a, b, c, p);
++ return vmladavaxq_p_s32 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vmladavaxt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavaxt.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo1 (int32_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
++foo1 (int32_t add, int32x4_t m1, int32x4_t m2, mve_pred16_t p)
+ {
+- return vmladavaxq_p (a, b, c, p);
++ return vmladavaxq_p (add, m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladavaxt.s32" } } */
+-/* { dg-final { scan-assembler "vmladavaxt.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaxq_p_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaxq_p_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavaxt.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo (int32_t a, int8x16_t b, int8x16_t c, mve_pred16_t p)
++foo (int32_t add, int8x16_t m1, int8x16_t m2, mve_pred16_t p)
+ {
+- return vmladavaxq_p_s8 (a, b, c, p);
++ return vmladavaxq_p_s8 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vmladavaxt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavaxt.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo1 (int32_t a, int8x16_t b, int8x16_t c, mve_pred16_t p)
++foo1 (int32_t add, int8x16_t m1, int8x16_t m2, mve_pred16_t p)
+ {
+- return vmladavaxq_p (a, b, c, p);
++ return vmladavaxq_p (add, m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladavaxt.s8" } } */
+-/* { dg-final { scan-assembler "vmladavaxt.s8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaxq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaxq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmladavax.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo (int32_t a, int16x8_t b, int16x8_t c)
++foo (int32_t add, int16x8_t m1, int16x8_t m2)
+ {
+- return vmladavaxq_s16 (a, b, c);
++ return vmladavaxq_s16 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmladavax.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmladavax.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo1 (int32_t a, int16x8_t b, int16x8_t c)
++foo1 (int32_t add, int16x8_t m1, int16x8_t m2)
+ {
+- return vmladavaxq (a, b, c);
++ return vmladavaxq (add, m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladavax.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaxq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaxq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmladavax.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo (int32_t a, int32x4_t b, int32x4_t c)
++foo (int32_t add, int32x4_t m1, int32x4_t m2)
+ {
+- return vmladavaxq_s32 (a, b, c);
++ return vmladavaxq_s32 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmladavax.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmladavax.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo1 (int32_t a, int32x4_t b, int32x4_t c)
++foo1 (int32_t add, int32x4_t m1, int32x4_t m2)
+ {
+- return vmladavaxq (a, b, c);
++ return vmladavaxq (add, m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladavax.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaxq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaxq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmladavax.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo (int32_t a, int8x16_t b, int8x16_t c)
++foo (int32_t add, int8x16_t m1, int8x16_t m2)
+ {
+- return vmladavaxq_s8 (a, b, c);
++ return vmladavaxq_s8 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmladavax.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmladavax.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo1 (int32_t a, int8x16_t b, int8x16_t c)
++foo1 (int32_t add, int8x16_t m1, int8x16_t m2)
+ {
+- return vmladavaxq (a, b, c);
++ return vmladavaxq (add, m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladavax.s8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_s16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavt.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
++foo (int16x8_t m1, int16x8_t m2, mve_pred16_t p)
+ {
+- return vmladavq_p_s16 (a, b, p);
++ return vmladavq_p_s16 (m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vmladavt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavt.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
++foo1 (int16x8_t m1, int16x8_t m2, mve_pred16_t p)
+ {
+- return vmladavq_p (a, b, p);
++ return vmladavq_p (m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladavt.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavt.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
++foo (int32x4_t m1, int32x4_t m2, mve_pred16_t p)
+ {
+- return vmladavq_p_s32 (a, b, p);
++ return vmladavq_p_s32 (m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vmladavt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavt.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
++foo1 (int32x4_t m1, int32x4_t m2, mve_pred16_t p)
+ {
+- return vmladavq_p (a, b, p);
++ return vmladavq_p (m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladavt.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_s8.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavt.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
++foo (int8x16_t m1, int8x16_t m2, mve_pred16_t p)
+ {
+- return vmladavq_p_s8 (a, b, p);
++ return vmladavq_p_s8 (m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vmladavt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavt.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
++foo1 (int8x16_t m1, int8x16_t m2, mve_pred16_t p)
+ {
+- return vmladavq_p (a, b, p);
++ return vmladavq_p (m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladavt.s8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_u16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavt.u16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+-foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
++foo (uint16x8_t m1, uint16x8_t m2, mve_pred16_t p)
+ {
+- return vmladavq_p_u16 (a, b, p);
++ return vmladavq_p_u16 (m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vmladavt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavt.u16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+-foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
++foo1 (uint16x8_t m1, uint16x8_t m2, mve_pred16_t p)
+ {
+- return vmladavq_p (a, b, p);
++ return vmladavq_p (m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladavt.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_u32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavt.u32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+-foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
++foo (uint32x4_t m1, uint32x4_t m2, mve_pred16_t p)
+ {
+- return vmladavq_p_u32 (a, b, p);
++ return vmladavq_p_u32 (m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vmladavt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavt.u32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+-foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
++foo1 (uint32x4_t m1, uint32x4_t m2, mve_pred16_t p)
+ {
+- return vmladavq_p (a, b, p);
++ return vmladavq_p (m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladavt.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_u8.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavt.u8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+-foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
++foo (uint8x16_t m1, uint8x16_t m2, mve_pred16_t p)
+ {
+- return vmladavq_p_u8 (a, b, p);
++ return vmladavq_p_u8 (m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vmladavt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavt.u8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+-foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
++foo1 (uint8x16_t m1, uint8x16_t m2, mve_pred16_t p)
+ {
+- return vmladavq_p (a, b, p);
++ return vmladavq_p (m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladavt.u8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmladav.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo (int16x8_t a, int16x8_t b)
++foo (int16x8_t m1, int16x8_t m2)
+ {
+- return vmladavq_s16 (a, b);
++ return vmladavq_s16 (m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmladav.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmladav.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo1 (int16x8_t a, int16x8_t b)
++foo1 (int16x8_t m1, int16x8_t m2)
+ {
+- return vmladavq (a, b);
++ return vmladavq (m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladav.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmladav.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo (int32x4_t a, int32x4_t b)
++foo (int32x4_t m1, int32x4_t m2)
+ {
+- return vmladavq_s32 (a, b);
++ return vmladavq_s32 (m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmladav.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmladav.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo1 (int32x4_t a, int32x4_t b)
++foo1 (int32x4_t m1, int32x4_t m2)
+ {
+- return vmladavq (a, b);
++ return vmladavq (m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladav.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmladav.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo (int8x16_t a, int8x16_t b)
++foo (int8x16_t m1, int8x16_t m2)
+ {
+- return vmladavq_s8 (a, b);
++ return vmladavq_s8 (m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmladav.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmladav.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo1 (int8x16_t a, int8x16_t b)
++foo1 (int8x16_t m1, int8x16_t m2)
+ {
+- return vmladavq (a, b);
++ return vmladavq (m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladav.s8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmladav.u16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+-foo (uint16x8_t a, uint16x8_t b)
++foo (uint16x8_t m1, uint16x8_t m2)
+ {
+- return vmladavq_u16 (a, b);
++ return vmladavq_u16 (m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmladav.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmladav.u16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+-foo1 (uint16x8_t a, uint16x8_t b)
++foo1 (uint16x8_t m1, uint16x8_t m2)
+ {
+- return vmladavq (a, b);
++ return vmladavq (m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladav.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmladav.u32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+-foo (uint32x4_t a, uint32x4_t b)
++foo (uint32x4_t m1, uint32x4_t m2)
+ {
+- return vmladavq_u32 (a, b);
++ return vmladavq_u32 (m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmladav.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmladav.u32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+-foo1 (uint32x4_t a, uint32x4_t b)
++foo1 (uint32x4_t m1, uint32x4_t m2)
+ {
+- return vmladavq (a, b);
++ return vmladavq (m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladav.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmladav.u8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+-foo (uint8x16_t a, uint8x16_t b)
++foo (uint8x16_t m1, uint8x16_t m2)
+ {
+- return vmladavq_u8 (a, b);
++ return vmladavq_u8 (m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmladav.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmladav.u8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32_t
+-foo1 (uint8x16_t a, uint8x16_t b)
++foo1 (uint8x16_t m1, uint8x16_t m2)
+ {
+- return vmladavq (a, b);
++ return vmladavq (m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladav.u8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_p_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_p_s16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavxt.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
++foo (int16x8_t m1, int16x8_t m2, mve_pred16_t p)
+ {
+- return vmladavxq_p_s16 (a, b, p);
++ return vmladavxq_p_s16 (m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vmladavxt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavxt.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
++foo1 (int16x8_t m1, int16x8_t m2, mve_pred16_t p)
+ {
+- return vmladavxq_p (a, b, p);
++ return vmladavxq_p (m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladavxt.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavxt.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
++foo (int32x4_t m1, int32x4_t m2, mve_pred16_t p)
+ {
+- return vmladavxq_p_s32 (a, b, p);
++ return vmladavxq_p_s32 (m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vmladavxt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavxt.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
++foo1 (int32x4_t m1, int32x4_t m2, mve_pred16_t p)
+ {
+- return vmladavxq_p (a, b, p);
++ return vmladavxq_p (m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladavxt.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_p_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_p_s8.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavxt.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
++foo (int8x16_t m1, int8x16_t m2, mve_pred16_t p)
+ {
+- return vmladavxq_p_s8 (a, b, p);
++ return vmladavxq_p_s8 (m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vmladavxt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmladavxt.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
++foo1 (int8x16_t m1, int8x16_t m2, mve_pred16_t p)
+ {
+- return vmladavxq_p (a, b, p);
++ return vmladavxq_p (m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladavxt.s8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmladavx.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo (int16x8_t a, int16x8_t b)
++foo (int16x8_t m1, int16x8_t m2)
+ {
+- return vmladavxq_s16 (a, b);
++ return vmladavxq_s16 (m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmladavx.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmladavx.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo1 (int16x8_t a, int16x8_t b)
++foo1 (int16x8_t m1, int16x8_t m2)
+ {
+- return vmladavxq (a, b);
++ return vmladavxq (m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladavx.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmladavx.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo (int32x4_t a, int32x4_t b)
++foo (int32x4_t m1, int32x4_t m2)
+ {
+- return vmladavxq_s32 (a, b);
++ return vmladavxq_s32 (m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmladavx.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmladavx.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo1 (int32x4_t a, int32x4_t b)
++foo1 (int32x4_t m1, int32x4_t m2)
+ {
+- return vmladavxq (a, b);
++ return vmladavxq (m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladavx.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmladavx.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo (int8x16_t a, int8x16_t b)
++foo (int8x16_t m1, int8x16_t m2)
+ {
+- return vmladavxq_s8 (a, b);
++ return vmladavxq_s8 (m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmladavx.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmladavx.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+-foo1 (int8x16_t a, int8x16_t b)
++foo1 (int8x16_t m1, int8x16_t m2)
+ {
+- return vmladavxq (a, b);
++ return vmladavxq (m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmladavx.s8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavaq_p_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavaq_p_s16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlaldavat.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo (int64_t a, int16x8_t b, int16x8_t c, mve_pred16_t p)
++foo (int64_t add, int16x8_t m1, int16x8_t m2, mve_pred16_t p)
+ {
+- return vmlaldavaq_p_s16 (a, b, c, p);
++ return vmlaldavaq_p_s16 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlaldavat.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlaldavat.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo1 (int64_t a, int16x8_t b, int16x8_t c, mve_pred16_t p)
++foo1 (int64_t add, int16x8_t m1, int16x8_t m2, mve_pred16_t p)
+ {
+- return vmlaldavaq_p (a, b, c, p);
++ return vmlaldavaq_p (add, m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmlaldavat.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavaq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavaq_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlaldavat.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
++foo (int64_t add, int32x4_t m1, int32x4_t m2, mve_pred16_t p)
+ {
+- return vmlaldavaq_p_s32 (a, b, c, p);
++ return vmlaldavaq_p_s32 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlaldavat.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlaldavat.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo1 (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
++foo1 (int64_t add, int32x4_t m1, int32x4_t m2, mve_pred16_t p)
+ {
+- return vmlaldavaq_p (a, b, c, p);
++ return vmlaldavaq_p (add, m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmlaldavat.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavaq_p_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavaq_p_u16.c
+@@ -1,21 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlaldavat.u16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+-foo (uint64_t a, uint16x8_t b, uint16x8_t c, mve_pred16_t p)
++foo (uint64_t add, uint16x8_t m1, uint16x8_t m2, mve_pred16_t p)
+ {
+- return vmlaldavaq_p_u16 (a, b, c, p);
++ return vmlaldavaq_p_u16 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlaldavat.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlaldavat.u16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+-foo1 (uint64_t a, uint16x8_t b, uint16x8_t c, mve_pred16_t p)
++foo1 (uint64_t add, uint16x8_t m1, uint16x8_t m2, mve_pred16_t p)
+ {
+- return vmlaldavaq_p (a, b, c, p);
++ return vmlaldavaq_p (add, m1, m2, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlaldavat.u16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++uint64_t
++foo2 (uint16x8_t m1, uint16x8_t m2, mve_pred16_t p)
++{
++ return vmlaldavaq_p (1, m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmlaldavat.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavaq_p_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavaq_p_u32.c
+@@ -1,21 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlaldavat.u32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+-foo (uint64_t a, uint32x4_t b, uint32x4_t c, mve_pred16_t p)
++foo (uint64_t add, uint32x4_t m1, uint32x4_t m2, mve_pred16_t p)
+ {
+- return vmlaldavaq_p_u32 (a, b, c, p);
++ return vmlaldavaq_p_u32 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlaldavat.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlaldavat.u32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+-foo1 (uint64_t a, uint32x4_t b, uint32x4_t c, mve_pred16_t p)
++foo1 (uint64_t add, uint32x4_t m1, uint32x4_t m2, mve_pred16_t p)
+ {
+- return vmlaldavaq_p (a, b, c, p);
++ return vmlaldavaq_p (add, m1, m2, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlaldavat.u32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++uint64_t
++foo2 (uint32x4_t m1, uint32x4_t m2, mve_pred16_t p)
++{
++ return vmlaldavaq_p (1, m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmlaldavat.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavaq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavaq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlaldava.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo (int64_t a, int16x8_t b, int16x8_t c)
++foo (int64_t add, int16x8_t m1, int16x8_t m2)
+ {
+- return vmlaldavaq_s16 (a, b, c);
++ return vmlaldavaq_s16 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmlaldava.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmlaldava.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo1 (int64_t a, int16x8_t b, int16x8_t c)
++foo1 (int64_t add, int16x8_t m1, int16x8_t m2)
+ {
+- return vmlaldavaq (a, b, c);
++ return vmlaldavaq (add, m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmlaldava.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavaq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavaq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlaldava.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo (int64_t a, int32x4_t b, int32x4_t c)
++foo (int64_t add, int32x4_t m1, int32x4_t m2)
+ {
+- return vmlaldavaq_s32 (a, b, c);
++ return vmlaldavaq_s32 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmlaldava.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmlaldava.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo1 (int64_t a, int32x4_t b, int32x4_t c)
++foo1 (int64_t add, int32x4_t m1, int32x4_t m2)
+ {
+- return vmlaldavaq (a, b, c);
++ return vmlaldavaq (add, m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmlaldava.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavaq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavaq_u16.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlaldava.u16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+-foo (uint64_t a, uint16x8_t b, uint16x8_t c)
++foo (uint64_t add, uint16x8_t m1, uint16x8_t m2)
+ {
+- return vmlaldavaq_u16 (a, b, c);
++ return vmlaldavaq_u16 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmlaldava.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmlaldava.u16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+-foo1 (uint64_t a, uint16x8_t b, uint16x8_t c)
++foo1 (uint64_t add, uint16x8_t m1, uint16x8_t m2)
+ {
+- return vmlaldavaq (a, b, c);
++ return vmlaldavaq (add, m1, m2);
++}
++
++/*
++**foo2:
++** ...
++** vmlaldava.u16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++uint64_t
++foo2 (uint16x8_t m1, uint16x8_t m2)
++{
++ return vmlaldavaq (1, m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmlaldava.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavaq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavaq_u32.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlaldava.u32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+-foo (uint64_t a, uint32x4_t b, uint32x4_t c)
++foo (uint64_t add, uint32x4_t m1, uint32x4_t m2)
+ {
+- return vmlaldavaq_u32 (a, b, c);
++ return vmlaldavaq_u32 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmlaldava.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmlaldava.u32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+-foo1 (uint64_t a, uint32x4_t b, uint32x4_t c)
++foo1 (uint64_t add, uint32x4_t m1, uint32x4_t m2)
+ {
+- return vmlaldavaq (a, b, c);
++ return vmlaldavaq (add, m1, m2);
++}
++
++/*
++**foo2:
++** ...
++** vmlaldava.u32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++uint64_t
++foo2 (uint32x4_t m1, uint32x4_t m2)
++{
++ return vmlaldavaq (1, m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmlaldava.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavaxq_p_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavaxq_p_s16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlaldavaxt.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo (int64_t a, int16x8_t b, int16x8_t c, mve_pred16_t p)
++foo (int64_t add, int16x8_t m1, int16x8_t m2, mve_pred16_t p)
+ {
+- return vmlaldavaxq_p_s16 (a, b, c, p);
++ return vmlaldavaxq_p_s16 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlaldavaxt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlaldavaxt.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo1 (int64_t a, int16x8_t b, int16x8_t c, mve_pred16_t p)
++foo1 (int64_t add, int16x8_t m1, int16x8_t m2, mve_pred16_t p)
+ {
+- return vmlaldavaxq_p (a, b, c, p);
++ return vmlaldavaxq_p (add, m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmlaldavaxt.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavaxq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavaxq_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlaldavaxt.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
++foo (int64_t add, int32x4_t m1, int32x4_t m2, mve_pred16_t p)
+ {
+- return vmlaldavaxq_p_s32 (a, b, c, p);
++ return vmlaldavaxq_p_s32 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlaldavaxt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlaldavaxt.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo1 (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
++foo1 (int64_t add, int32x4_t m1, int32x4_t m2, mve_pred16_t p)
+ {
+- return vmlaldavaxq_p (a, b, c, p);
++ return vmlaldavaxq_p (add, m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmlaldavaxt.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavaxq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavaxq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlaldavax.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo (int64_t a, int16x8_t b, int16x8_t c)
++foo (int64_t add, int16x8_t m1, int16x8_t m2)
+ {
+- return vmlaldavaxq_s16 (a, b, c);
++ return vmlaldavaxq_s16 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmlaldavax.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmlaldavax.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo1 (int64_t a, int16x8_t b, int16x8_t c)
++foo1 (int64_t add, int16x8_t m1, int16x8_t m2)
+ {
+- return vmlaldavaxq (a, b, c);
++ return vmlaldavaxq (add, m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmlaldavax.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavaxq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavaxq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlaldavax.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo (int64_t a, int32x4_t b, int32x4_t c)
++foo (int64_t add, int32x4_t m1, int32x4_t m2)
+ {
+- return vmlaldavaxq_s32 (a, b, c);
++ return vmlaldavaxq_s32 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmlaldavax.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmlaldavax.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo1 (int64_t a, int32x4_t b, int32x4_t c)
++foo1 (int64_t add, int32x4_t m1, int32x4_t m2)
+ {
+- return vmlaldavaxq (a, b, c);
++ return vmlaldavaxq (add, m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmlaldavax.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavq_p_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavq_p_s16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlaldavt.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
++foo (int16x8_t m1, int16x8_t m2, mve_pred16_t p)
+ {
+- return vmlaldavq_p_s16 (a, b, p);
++ return vmlaldavq_p_s16 (m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlaldavt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlaldavt.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
++foo1 (int16x8_t m1, int16x8_t m2, mve_pred16_t p)
+ {
+- return vmlaldavq_p (a, b, p);
++ return vmlaldavq_p (m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmlaldavt.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavq_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlaldavt.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
++foo (int32x4_t m1, int32x4_t m2, mve_pred16_t p)
+ {
+- return vmlaldavq_p_s32 (a, b, p);
++ return vmlaldavq_p_s32 (m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlaldavt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlaldavt.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
++foo1 (int32x4_t m1, int32x4_t m2, mve_pred16_t p)
+ {
+- return vmlaldavq_p (a, b, p);
++ return vmlaldavq_p (m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmlaldavt.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavq_p_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavq_p_u16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlaldavt.u16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+-foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
++foo (uint16x8_t m1, uint16x8_t m2, mve_pred16_t p)
+ {
+- return vmlaldavq_p_u16 (a, b, p);
++ return vmlaldavq_p_u16 (m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlaldavt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlaldavt.u16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+-foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
++foo1 (uint16x8_t m1, uint16x8_t m2, mve_pred16_t p)
+ {
+- return vmlaldavq_p (a, b, p);
++ return vmlaldavq_p (m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmlaldavt.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavq_p_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavq_p_u32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlaldavt.u32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+-foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
++foo (uint32x4_t m1, uint32x4_t m2, mve_pred16_t p)
+ {
+- return vmlaldavq_p_u32 (a, b, p);
++ return vmlaldavq_p_u32 (m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlaldavt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlaldavt.u32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+-foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
++foo1 (uint32x4_t m1, uint32x4_t m2, mve_pred16_t p)
+ {
+- return vmlaldavq_p (a, b, p);
++ return vmlaldavq_p (m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmlaldavt.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlaldav.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo (int16x8_t a, int16x8_t b)
++foo (int16x8_t m1, int16x8_t m2)
+ {
+- return vmlaldavq_s16 (a, b);
++ return vmlaldavq_s16 (m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmlaldav.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmlaldav.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo1 (int16x8_t a, int16x8_t b)
++foo1 (int16x8_t m1, int16x8_t m2)
+ {
+- return vmlaldavq (a, b);
++ return vmlaldavq (m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmlaldav.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlaldav.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo (int32x4_t a, int32x4_t b)
++foo (int32x4_t m1, int32x4_t m2)
+ {
+- return vmlaldavq_s32 (a, b);
++ return vmlaldavq_s32 (m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmlaldav.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmlaldav.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo1 (int32x4_t a, int32x4_t b)
++foo1 (int32x4_t m1, int32x4_t m2)
+ {
+- return vmlaldavq (a, b);
++ return vmlaldavq (m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmlaldav.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlaldav.u16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+-foo (uint16x8_t a, uint16x8_t b)
++foo (uint16x8_t m1, uint16x8_t m2)
+ {
+- return vmlaldavq_u16 (a, b);
++ return vmlaldavq_u16 (m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmlaldav.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmlaldav.u16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+-foo1 (uint16x8_t a, uint16x8_t b)
++foo1 (uint16x8_t m1, uint16x8_t m2)
+ {
+- return vmlaldavq (a, b);
++ return vmlaldavq (m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmlaldav.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlaldav.u32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+-foo (uint32x4_t a, uint32x4_t b)
++foo (uint32x4_t m1, uint32x4_t m2)
+ {
+- return vmlaldavq_u32 (a, b);
++ return vmlaldavq_u32 (m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmlaldav.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmlaldav.u32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+-foo1 (uint32x4_t a, uint32x4_t b)
++foo1 (uint32x4_t m1, uint32x4_t m2)
+ {
+- return vmlaldavq (a, b);
++ return vmlaldavq (m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmlaldav.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavxq_p_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavxq_p_s16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlaldavxt.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
++foo (int16x8_t m1, int16x8_t m2, mve_pred16_t p)
+ {
+- return vmlaldavxq_p_s16 (a, b, p);
++ return vmlaldavxq_p_s16 (m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlaldavxt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlaldavxt.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
++foo1 (int16x8_t m1, int16x8_t m2, mve_pred16_t p)
+ {
+- return vmlaldavxq_p (a, b, p);
++ return vmlaldavxq_p (m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmlaldavxt.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavxq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavxq_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlaldavxt.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
++foo (int32x4_t m1, int32x4_t m2, mve_pred16_t p)
+ {
+- return vmlaldavxq_p_s32 (a, b, p);
++ return vmlaldavxq_p_s32 (m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlaldavxt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlaldavxt.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
++foo1 (int32x4_t m1, int32x4_t m2, mve_pred16_t p)
+ {
+- return vmlaldavxq_p (a, b, p);
++ return vmlaldavxq_p (m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmlaldavxt.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavxq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavxq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlaldavx.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo (int16x8_t a, int16x8_t b)
++foo (int16x8_t m1, int16x8_t m2)
+ {
+- return vmlaldavxq_s16 (a, b);
++ return vmlaldavxq_s16 (m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmlaldavx.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmlaldavx.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo1 (int16x8_t a, int16x8_t b)
++foo1 (int16x8_t m1, int16x8_t m2)
+ {
+- return vmlaldavxq (a, b);
++ return vmlaldavxq (m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmlaldavx.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavxq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaldavxq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlaldavx.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo (int32x4_t a, int32x4_t b)
++foo (int32x4_t m1, int32x4_t m2)
+ {
+- return vmlaldavxq_s32 (a, b);
++ return vmlaldavxq_s32 (m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmlaldavx.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmlaldavx.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+-foo1 (int32x4_t a, int32x4_t b)
++foo1 (int32x4_t m1, int32x4_t m2)
+ {
+- return vmlaldavxq (a, b);
++ return vmlaldavxq (m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmlaldavx.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlat.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo (int16x8_t a, int16x8_t b, int16_t c, mve_pred16_t p)
++foo (int16x8_t add, int16x8_t m1, int16_t m2, mve_pred16_t p)
+ {
+- return vmlaq_m_n_s16 (a, b, c, p);
++ return vmlaq_m_n_s16 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmlat.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlat.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo1 (int16x8_t a, int16x8_t b, int16_t c, mve_pred16_t p)
++foo1 (int16x8_t add, int16x8_t m1, int16_t m2, mve_pred16_t p)
+ {
+- return vmlaq_m (a, b, c, p);
++ return vmlaq_m (add, m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmlat.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlat.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int32x4_t a, int32x4_t b, int32_t c, mve_pred16_t p)
++foo (int32x4_t add, int32x4_t m1, int32_t m2, mve_pred16_t p)
+ {
+- return vmlaq_m_n_s32 (a, b, c, p);
++ return vmlaq_m_n_s32 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmlat.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlat.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int32x4_t a, int32x4_t b, int32_t c, mve_pred16_t p)
++foo1 (int32x4_t add, int32x4_t m1, int32_t m2, mve_pred16_t p)
+ {
+- return vmlaq_m (a, b, c, p);
++ return vmlaq_m (add, m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmlat.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_m_n_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlat.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo (int8x16_t a, int8x16_t b, int8_t c, mve_pred16_t p)
++foo (int8x16_t add, int8x16_t m1, int8_t m2, mve_pred16_t p)
+ {
+- return vmlaq_m_n_s8 (a, b, c, p);
++ return vmlaq_m_n_s8 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmlat.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlat.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo1 (int8x16_t a, int8x16_t b, int8_t c, mve_pred16_t p)
++foo1 (int8x16_t add, int8x16_t m1, int8_t m2, mve_pred16_t p)
+ {
+- return vmlaq_m (a, b, c, p);
++ return vmlaq_m (add, m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmlat.s8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_m_n_u16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlat.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo (uint16x8_t a, uint16x8_t b, uint16_t c, mve_pred16_t p)
++foo (uint16x8_t add, uint16x8_t m1, uint16_t m2, mve_pred16_t p)
+ {
+- return vmlaq_m_n_u16 (a, b, c, p);
++ return vmlaq_m_n_u16 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmlat.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlat.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo1 (uint16x8_t a, uint16x8_t b, uint16_t c, mve_pred16_t p)
++foo1 (uint16x8_t add, uint16x8_t m1, uint16_t m2, mve_pred16_t p)
+ {
+- return vmlaq_m (a, b, c, p);
++ return vmlaq_m (add, m1, m2, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlat.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t add, uint16x8_t m1, mve_pred16_t p)
++{
++ return vmlaq_m (add, m1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmlat.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_m_n_u32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlat.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint32x4_t a, uint32x4_t b, uint32_t c, mve_pred16_t p)
++foo (uint32x4_t add, uint32x4_t m1, uint32_t m2, mve_pred16_t p)
+ {
+- return vmlaq_m_n_u32 (a, b, c, p);
++ return vmlaq_m_n_u32 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmlat.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlat.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (uint32x4_t a, uint32x4_t b, uint32_t c, mve_pred16_t p)
++foo1 (uint32x4_t add, uint32x4_t m1, uint32_t m2, mve_pred16_t p)
+ {
+- return vmlaq_m (a, b, c, p);
++ return vmlaq_m (add, m1, m2, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlat.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t add, uint32x4_t m1, mve_pred16_t p)
++{
++ return vmlaq_m (add, m1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmlat.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_m_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_m_n_u8.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlat.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo (uint8x16_t a, uint8x16_t b, uint8_t c, mve_pred16_t p)
++foo (uint8x16_t add, uint8x16_t m1, uint8_t m2, mve_pred16_t p)
+ {
+- return vmlaq_m_n_u8 (a, b, c, p);
++ return vmlaq_m_n_u8 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmlat.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlat.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo1 (uint8x16_t a, uint8x16_t b, uint8_t c, mve_pred16_t p)
++foo1 (uint8x16_t add, uint8x16_t m1, uint8_t m2, mve_pred16_t p)
+ {
+- return vmlaq_m (a, b, c, p);
++ return vmlaq_m (add, m1, m2, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlat.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t add, uint8x16_t m1, mve_pred16_t p)
++{
++ return vmlaq_m (add, m1, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmlat.u8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmla.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo (int16x8_t a, int16x8_t b, int16_t c)
++foo (int16x8_t add, int16x8_t m1, int16_t m2)
+ {
+- return vmlaq_n_s16 (a, b, c);
++ return vmlaq_n_s16 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmla.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmla.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo1 (int16x8_t a, int16x8_t b, int16_t c)
++foo1 (int16x8_t add, int16x8_t m1, int16_t m2)
+ {
+- return vmlaq (a, b, c);
++ return vmlaq (add, m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmla.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmla.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int32x4_t a, int32x4_t b, int32_t c)
++foo (int32x4_t add, int32x4_t m1, int32_t m2)
+ {
+- return vmlaq_n_s32 (a, b, c);
++ return vmlaq_n_s32 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmla.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmla.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int32x4_t a, int32x4_t b, int32_t c)
++foo1 (int32x4_t add, int32x4_t m1, int32_t m2)
+ {
+- return vmlaq (a, b, c);
++ return vmlaq (add, m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmla.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmla.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo (int8x16_t a, int8x16_t b, int8_t c)
++foo (int8x16_t add, int8x16_t m1, int8_t m2)
+ {
+- return vmlaq_n_s8 (a, b, c);
++ return vmlaq_n_s8 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmla.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmla.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo1 (int8x16_t a, int8x16_t b, int8_t c)
++foo1 (int8x16_t add, int8x16_t m1, int8_t m2)
+ {
+- return vmlaq (a, b, c);
++ return vmlaq (add, m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmla.s8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_u16.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmla.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo (uint16x8_t a, uint16x8_t b, uint16_t c)
++foo (uint16x8_t add, uint16x8_t m1, uint16_t m2)
+ {
+- return vmlaq_n_u16 (a, b, c);
++ return vmlaq_n_u16 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmla.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmla.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo1 (uint16x8_t a, uint16x8_t b, uint16_t c)
++foo1 (uint16x8_t add, uint16x8_t m1, uint16_t m2)
+ {
+- return vmlaq (a, b, c);
++ return vmlaq (add, m1, m2);
++}
++
++/*
++**foo2:
++** ...
++** vmla.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t add, uint16x8_t m1)
++{
++ return vmlaq (add, m1, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmla.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_u32.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmla.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint32x4_t a, uint32x4_t b, uint32_t c)
++foo (uint32x4_t add, uint32x4_t m1, uint32_t m2)
+ {
+- return vmlaq_n_u32 (a, b, c);
++ return vmlaq_n_u32 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmla.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmla.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (uint32x4_t a, uint32x4_t b, uint32_t c)
++foo1 (uint32x4_t add, uint32x4_t m1, uint32_t m2)
+ {
+- return vmlaq (a, b, c);
++ return vmlaq (add, m1, m2);
++}
++
++/*
++**foo2:
++** ...
++** vmla.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t add, uint32x4_t m1)
++{
++ return vmlaq (add, m1, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmla.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_u8.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmla.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo (uint8x16_t a, uint8x16_t b, uint8_t c)
++foo (uint8x16_t add, uint8x16_t m1, uint8_t m2)
+ {
+- return vmlaq_n_u8 (a, b, c);
++ return vmlaq_n_u8 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vmla.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmla.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo1 (uint8x16_t a, uint8x16_t b, uint8_t c)
++foo1 (uint8x16_t add, uint8x16_t m1, uint8_t m2)
+ {
+- return vmlaq (a, b, c);
++ return vmlaq (add, m1, m2);
++}
++
++/*
++**foo2:
++** ...
++** vmla.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t add, uint8x16_t m1)
++{
++ return vmlaq (add, m1, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmla.u8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlast.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo (int16x8_t a, int16x8_t b, int16_t c, mve_pred16_t p)
++foo (int16x8_t m1, int16x8_t m2, int16_t add, mve_pred16_t p)
+ {
+- return vmlasq_m_n_s16 (a, b, c, p);
++ return vmlasq_m_n_s16 (m1, m2, add, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmlast.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlast.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo1 (int16x8_t a, int16x8_t b, int16_t c, mve_pred16_t p)
++foo1 (int16x8_t m1, int16x8_t m2, int16_t add, mve_pred16_t p)
+ {
+- return vmlasq_m (a, b, c, p);
++ return vmlasq_m (m1, m2, add, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmlast.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlast.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int32x4_t a, int32x4_t b, int32_t c, mve_pred16_t p)
++foo (int32x4_t m1, int32x4_t m2, int32_t add, mve_pred16_t p)
+ {
+- return vmlasq_m_n_s32 (a, b, c, p);
++ return vmlasq_m_n_s32 (m1, m2, add, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmlast.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlast.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int32x4_t a, int32x4_t b, int32_t c, mve_pred16_t p)
++foo1 (int32x4_t m1, int32x4_t m2, int32_t add, mve_pred16_t p)
+ {
+- return vmlasq_m (a, b, c, p);
++ return vmlasq_m (m1, m2, add, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmlast.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_m_n_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlast.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo (int8x16_t a, int8x16_t b, int8_t c, mve_pred16_t p)
++foo (int8x16_t m1, int8x16_t m2, int8_t add, mve_pred16_t p)
+ {
+- return vmlasq_m_n_s8 (a, b, c, p);
++ return vmlasq_m_n_s8 (m1, m2, add, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmlast.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlast.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo1 (int8x16_t a, int8x16_t b, int8_t c, mve_pred16_t p)
++foo1 (int8x16_t m1, int8x16_t m2, int8_t add, mve_pred16_t p)
+ {
+- return vmlasq_m (a, b, c, p);
++ return vmlasq_m (m1, m2, add, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmlast.s8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_m_n_u16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlast.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo (uint16x8_t a, uint16x8_t b, uint16_t c, mve_pred16_t p)
++foo (uint16x8_t m1, uint16x8_t m2, uint16_t add, mve_pred16_t p)
+ {
+- return vmlasq_m_n_u16 (a, b, c, p);
++ return vmlasq_m_n_u16 (m1, m2, add, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmlast.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlast.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo1 (uint16x8_t a, uint16x8_t b, uint16_t c, mve_pred16_t p)
++foo1 (uint16x8_t m1, uint16x8_t m2, uint16_t add, mve_pred16_t p)
+ {
+- return vmlasq_m (a, b, c, p);
++ return vmlasq_m (m1, m2, add, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlast.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t m1, uint16x8_t m2, mve_pred16_t p)
++{
++ return vmlasq_m (m1, m2, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmlast.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_m_n_u32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlast.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint32x4_t a, uint32x4_t b, uint32_t c, mve_pred16_t p)
++foo (uint32x4_t m1, uint32x4_t m2, uint32_t add, mve_pred16_t p)
+ {
+- return vmlasq_m_n_u32 (a, b, c, p);
++ return vmlasq_m_n_u32 (m1, m2, add, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmlast.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlast.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (uint32x4_t a, uint32x4_t b, uint32_t c, mve_pred16_t p)
++foo1 (uint32x4_t m1, uint32x4_t m2, uint32_t add, mve_pred16_t p)
+ {
+- return vmlasq_m (a, b, c, p);
++ return vmlasq_m (m1, m2, add, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlast.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t m1, uint32x4_t m2, mve_pred16_t p)
++{
++ return vmlasq_m (m1, m2, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmlast.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_m_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_m_n_u8.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlast.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo (uint8x16_t a, uint8x16_t b, uint8_t c, mve_pred16_t p)
++foo (uint8x16_t m1, uint8x16_t m2, uint8_t add, mve_pred16_t p)
+ {
+- return vmlasq_m_n_u8 (a, b, c, p);
++ return vmlasq_m_n_u8 (m1, m2, add, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmlast.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlast.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo1 (uint8x16_t a, uint8x16_t b, uint8_t c, mve_pred16_t p)
++foo1 (uint8x16_t m1, uint8x16_t m2, uint8_t add, mve_pred16_t p)
+ {
+- return vmlasq_m (a, b, c, p);
++ return vmlasq_m (m1, m2, add, p);
++}
++
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlast.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t m1, uint8x16_t m2, mve_pred16_t p)
++{
++ return vmlasq_m (m1, m2, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmlast.u8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlas.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo (int16x8_t a, int16x8_t b, int16_t c)
++foo (int16x8_t m1, int16x8_t m2, int16_t add)
+ {
+- return vmlasq_n_s16 (a, b, c);
++ return vmlasq_n_s16 (m1, m2, add);
+ }
+
+-/* { dg-final { scan-assembler "vmlas.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmlas.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo1 (int16x8_t a, int16x8_t b, int16_t c)
++foo1 (int16x8_t m1, int16x8_t m2, int16_t add)
+ {
+- return vmlasq (a, b, c);
++ return vmlasq (m1, m2, add);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmlas.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlas.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int32x4_t a, int32x4_t b, int32_t c)
++foo (int32x4_t m1, int32x4_t m2, int32_t add)
+ {
+- return vmlasq_n_s32 (a, b, c);
++ return vmlasq_n_s32 (m1, m2, add);
+ }
+
+-/* { dg-final { scan-assembler "vmlas.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmlas.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int32x4_t a, int32x4_t b, int32_t c)
++foo1 (int32x4_t m1, int32x4_t m2, int32_t add)
+ {
+- return vmlasq (a, b, c);
++ return vmlasq (m1, m2, add);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmlas.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlas.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo (int8x16_t a, int8x16_t b, int8_t c)
++foo (int8x16_t m1, int8x16_t m2, int8_t add)
+ {
+- return vmlasq_n_s8 (a, b, c);
++ return vmlasq_n_s8 (m1, m2, add);
+ }
+
+-/* { dg-final { scan-assembler "vmlas.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmlas.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo1 (int8x16_t a, int8x16_t b, int8_t c)
++foo1 (int8x16_t m1, int8x16_t m2, int8_t add)
+ {
+- return vmlasq (a, b, c);
++ return vmlasq (m1, m2, add);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmlas.s8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_u16.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlas.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo (uint16x8_t a, uint16x8_t b, uint16_t c)
++foo (uint16x8_t m1, uint16x8_t m2, uint16_t add)
+ {
+- return vmlasq_n_u16 (a, b, c);
++ return vmlasq_n_u16 (m1, m2, add);
+ }
+
+-/* { dg-final { scan-assembler "vmlas.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmlas.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo1 (uint16x8_t a, uint16x8_t b, uint16_t c)
++foo1 (uint16x8_t m1, uint16x8_t m2, uint16_t add)
+ {
+- return vmlasq (a, b, c);
++ return vmlasq (m1, m2, add);
++}
++
++/*
++**foo2:
++** ...
++** vmlas.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t m1, uint16x8_t m2)
++{
++ return vmlasq (m1, m2, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmlas.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_u32.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlas.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint32x4_t a, uint32x4_t b, uint32_t c)
++foo (uint32x4_t m1, uint32x4_t m2, uint32_t add)
+ {
+- return vmlasq_n_u32 (a, b, c);
++ return vmlasq_n_u32 (m1, m2, add);
+ }
+
+-/* { dg-final { scan-assembler "vmlas.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmlas.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (uint32x4_t a, uint32x4_t b, uint32_t c)
++foo1 (uint32x4_t m1, uint32x4_t m2, uint32_t add)
+ {
+- return vmlasq (a, b, c);
++ return vmlasq (m1, m2, add);
++}
++
++/*
++**foo2:
++** ...
++** vmlas.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t m1, uint32x4_t m2)
++{
++ return vmlasq (m1, m2, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmlas.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_u8.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlas.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo (uint8x16_t a, uint8x16_t b, uint8_t c)
++foo (uint8x16_t m1, uint8x16_t m2, uint8_t add)
+ {
+- return vmlasq_n_u8 (a, b, c);
++ return vmlasq_n_u8 (m1, m2, add);
+ }
+
+-/* { dg-final { scan-assembler "vmlas.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmlas.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo1 (uint8x16_t a, uint8x16_t b, uint8_t c)
++foo1 (uint8x16_t m1, uint8x16_t m2, uint8_t add)
+ {
+- return vmlasq (a, b, c);
++ return vmlasq (m1, m2, add);
++}
++
++/*
++**foo2:
++** ...
++** vmlas.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t m1, uint8x16_t m2)
++{
++ return vmlasq (m1, m2, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmlas.u8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaq_p_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaq_p_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsdavat.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int32_t a, int16x8_t b, int16x8_t c, mve_pred16_t p)
+ {
+ return vmlsdavaq_p_s16 (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavat.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsdavat.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int32_t a, int16x8_t b, int16x8_t c, mve_pred16_t p)
+ {
+ return vmlsdavaq_p (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavat.s16" } } */
+-/* { dg-final { scan-assembler "vmlsdavat.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaq_p_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsdavat.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int32_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
+ {
+ return vmlsdavaq_p_s32 (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavat.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsdavat.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int32_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
+ {
+ return vmlsdavaq_p (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavat.s32" } } */
+-/* { dg-final { scan-assembler "vmlsdavat.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaq_p_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaq_p_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsdavat.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int32_t a, int8x16_t b, int8x16_t c, mve_pred16_t p)
+ {
+ return vmlsdavaq_p_s8 (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavat.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsdavat.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int32_t a, int8x16_t b, int8x16_t c, mve_pred16_t p)
+ {
+ return vmlsdavaq_p (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavat.s8" } } */
+-/* { dg-final { scan-assembler "vmlsdavat.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlsdava.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int32_t a, int16x8_t b, int16x8_t c)
+ {
+ return vmlsdavaq_s16 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdava.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmlsdava.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int32_t a, int16x8_t b, int16x8_t c)
+ {
+ return vmlsdavaq (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdava.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlsdava.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int32_t a, int32x4_t b, int32x4_t c)
+ {
+ return vmlsdavaq_s32 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdava.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmlsdava.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int32_t a, int32x4_t b, int32x4_t c)
+ {
+ return vmlsdavaq (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdava.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlsdava.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int32_t a, int8x16_t b, int8x16_t c)
+ {
+ return vmlsdavaq_s8 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdava.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmlsdava.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int32_t a, int8x16_t b, int8x16_t c)
+ {
+ return vmlsdavaq (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdava.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaxq_p_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaxq_p_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsdavaxt.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int32_t a, int16x8_t b, int16x8_t c, mve_pred16_t p)
+ {
+ return vmlsdavaxq_p_s16 (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavaxt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsdavaxt.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int32_t a, int16x8_t b, int16x8_t c, mve_pred16_t p)
+ {
+ return vmlsdavaxq_p (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavaxt.s16" } } */
+-/* { dg-final { scan-assembler "vmlsdavaxt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaxq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaxq_p_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsdavaxt.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int32_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
+ {
+ return vmlsdavaxq_p_s32 (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavaxt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsdavaxt.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int32_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
+ {
+ return vmlsdavaxq_p (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavaxt.s32" } } */
+-/* { dg-final { scan-assembler "vmlsdavaxt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaxq_p_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaxq_p_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsdavaxt.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int32_t a, int8x16_t b, int8x16_t c, mve_pred16_t p)
+ {
+ return vmlsdavaxq_p_s8 (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavaxt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsdavaxt.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int32_t a, int8x16_t b, int8x16_t c, mve_pred16_t p)
+ {
+ return vmlsdavaxq_p (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavaxt.s8" } } */
+-/* { dg-final { scan-assembler "vmlsdavaxt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaxq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaxq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlsdavax.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int32_t a, int16x8_t b, int16x8_t c)
+ {
+ return vmlsdavaxq_s16 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavax.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmlsdavax.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int32_t a, int16x8_t b, int16x8_t c)
+ {
+ return vmlsdavaxq (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavax.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaxq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaxq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlsdavax.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int32_t a, int32x4_t b, int32x4_t c)
+ {
+ return vmlsdavaxq_s32 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavax.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmlsdavax.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int32_t a, int32x4_t b, int32x4_t c)
+ {
+ return vmlsdavaxq (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavax.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaxq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaxq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlsdavax.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int32_t a, int8x16_t b, int8x16_t c)
+ {
+ return vmlsdavaxq_s8 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavax.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmlsdavax.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int32_t a, int8x16_t b, int8x16_t c)
+ {
+ return vmlsdavaxq (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavax.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_p_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_p_s16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsdavt.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmlsdavq_p_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsdavt.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmlsdavq_p (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsdavt.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmlsdavq_p_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsdavt.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmlsdavq_p (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_p_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_p_s8.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsdavt.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vmlsdavq_p_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsdavt.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vmlsdavq_p (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlsdav.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vmlsdavq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdav.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmlsdav.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vmlsdavq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdav.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlsdav.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vmlsdavq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdav.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmlsdav.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vmlsdavq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdav.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlsdav.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vmlsdavq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdav.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmlsdav.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vmlsdavq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdav.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_p_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_p_s16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsdavxt.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmlsdavxq_p_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavxt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsdavxt.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmlsdavxq_p (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavxt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsdavxt.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmlsdavxq_p_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavxt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsdavxt.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmlsdavxq_p (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavxt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_p_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_p_s8.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsdavxt.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vmlsdavxq_p_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavxt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsdavxt.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vmlsdavxq_p (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavxt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlsdavx.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vmlsdavxq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavx.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmlsdavx.s16 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vmlsdavxq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavx.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlsdavx.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vmlsdavxq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavx.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmlsdavx.s32 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vmlsdavxq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavx.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlsdavx.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vmlsdavxq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavx.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmlsdavx.s8 (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vmlsdavxq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmlsdavx.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavaq_p_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavaq_p_s16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsldavat.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int64_t a, int16x8_t b, int16x8_t c, mve_pred16_t p)
+ {
+ return vmlsldavaq_p_s16 (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldavat.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsldavat.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int64_t a, int16x8_t b, int16x8_t c, mve_pred16_t p)
+ {
+ return vmlsldavaq_p (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldavat.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavaq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavaq_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsldavat.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
+ {
+ return vmlsldavaq_p_s32 (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldavat.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsldavat.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
+ {
+ return vmlsldavaq_p (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldavat.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavaq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavaq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlsldava.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int64_t a, int16x8_t b, int16x8_t c)
+ {
+ return vmlsldavaq_s16 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldava.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmlsldava.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int64_t a, int16x8_t b, int16x8_t c)
+ {
+ return vmlsldavaq (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldava.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavaq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavaq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlsldava.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int64_t a, int32x4_t b, int32x4_t c)
+ {
+ return vmlsldavaq_s32 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldava.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmlsldava.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int64_t a, int32x4_t b, int32x4_t c)
+ {
+ return vmlsldavaq (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldava.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavaxq_p_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavaxq_p_s16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsldavaxt.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int64_t a, int16x8_t b, int16x8_t c, mve_pred16_t p)
+ {
+ return vmlsldavaxq_p_s16 (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldavaxt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsldavaxt.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int64_t a, int16x8_t b, int16x8_t c, mve_pred16_t p)
+ {
+ return vmlsldavaxq_p (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldavaxt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavaxq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavaxq_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsldavaxt.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
+ {
+ return vmlsldavaxq_p_s32 (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldavaxt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsldavaxt.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
+ {
+ return vmlsldavaxq_p (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldavaxt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavaxq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavaxq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlsldavax.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int64_t a, int16x8_t b, int16x8_t c)
+ {
+ return vmlsldavaxq_s16 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldavax.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmlsldavax.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int64_t a, int16x8_t b, int16x8_t c)
+ {
+ return vmlsldavaxq (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldavax.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavaxq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavaxq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlsldavax.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int64_t a, int32x4_t b, int32x4_t c)
+ {
+ return vmlsldavaxq_s32 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldavax.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmlsldavax.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int64_t a, int32x4_t b, int32x4_t c)
+ {
+ return vmlsldavaxq (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldavax.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavq_p_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavq_p_s16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsldavt.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmlsldavq_p_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldavt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsldavt.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmlsldavq_p (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldavt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavq_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsldavt.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmlsldavq_p_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldavt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsldavt.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmlsldavq_p (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldavt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlsldav.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vmlsldavq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldav.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmlsldav.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vmlsldavq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldav.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlsldav.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vmlsldavq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldav.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmlsldav.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vmlsldavq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldav.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavxq_p_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavxq_p_s16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsldavxt.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmlsldavxq_p_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldavxt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsldavxt.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmlsldavxq_p (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldavxt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavxq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavxq_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsldavxt.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmlsldavxq_p_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldavxt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmlsldavxt.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmlsldavxq_p (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldavxt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavxq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavxq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlsldavx.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vmlsldavxq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldavx.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmlsldavx.s16 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vmlsldavxq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldavx.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavxq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsldavxq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmlsldavx.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vmlsldavxq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldavx.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmlsldavx.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vmlsldavxq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmlsldavx.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_m_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovlbt.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vmovlbq_m_s16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmovlbt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovlbt.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vmovlbq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_m_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovlbt.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vmovlbq_m_s8 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmovlbt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovlbt.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vmovlbq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_m_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovlbt.u16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint16x8_t a, mve_pred16_t p)
+ {
+ return vmovlbq_m_u16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmovlbt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovlbt.u16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint16x8_t a, mve_pred16_t p)
+ {
+ return vmovlbq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_m_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovlbt.u8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint8x16_t a, mve_pred16_t p)
+ {
+ return vmovlbq_m_u8 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmovlbt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovlbt.u8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint8x16_t a, mve_pred16_t p)
+ {
+ return vmovlbq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_s16.c
+@@ -1,21 +1,41 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmovlb.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int16x8_t a)
+ {
+ return vmovlbq_s16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vmovlb.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmovlb.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int16x8_t a)
+ {
+ return vmovlbq (a);
+ }
+
+-/* { dg-final { scan-assembler "vmovlb.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmovlb.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int8x16_t a)
+ {
+ return vmovlbq_s8 (a);
+ }
+
+-/* { dg-final { scan-assembler "vmovlb.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmovlb.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int8x16_t a)
+ {
+ return vmovlbq (a);
+ }
+
+-/* { dg-final { scan-assembler "vmovlb.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmovlb.u16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint16x8_t a)
+ {
+- return vmovlbq_u16 (a);
++ return vmovlbq_u16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vmovlb.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmovlb.u16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint16x8_t a)
+ {
+- return vmovlbq (a);
++ return vmovlbq (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmovlb.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmovlb.u8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint8x16_t a)
+ {
+- return vmovlbq_u8 (a);
++ return vmovlbq_u8 (a);
+ }
+
+-/* { dg-final { scan-assembler "vmovlb.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmovlb.u8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint8x16_t a)
+ {
+- return vmovlbq (a);
++ return vmovlbq (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmovlb.u8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_x_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovlbt.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int16x8_t a, mve_pred16_t p)
+ {
+ return vmovlbq_x_s16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmovlbt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovlbt.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int16x8_t a, mve_pred16_t p)
+ {
+ return vmovlbq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_x_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovlbt.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int8x16_t a, mve_pred16_t p)
+ {
+ return vmovlbq_x_s8 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmovlbt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovlbt.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int8x16_t a, mve_pred16_t p)
+ {
+ return vmovlbq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_x_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_x_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovlbt.u16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint16x8_t a, mve_pred16_t p)
+ {
+ return vmovlbq_x_u16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmovlbt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovlbt.u16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint16x8_t a, mve_pred16_t p)
+ {
+ return vmovlbq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_x_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_x_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovlbt.u8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint8x16_t a, mve_pred16_t p)
+ {
+ return vmovlbq_x_u8 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmovlbt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovlbt.u8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint8x16_t a, mve_pred16_t p)
+ {
+ return vmovlbq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_m_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovltt.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vmovltq_m_s16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmovltt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovltt.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vmovltq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_m_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovltt.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vmovltq_m_s8 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmovltt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovltt.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vmovltq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_m_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovltt.u16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint16x8_t a, mve_pred16_t p)
+ {
+ return vmovltq_m_u16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmovltt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovltt.u16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint16x8_t a, mve_pred16_t p)
+ {
+ return vmovltq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_m_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovltt.u8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint8x16_t a, mve_pred16_t p)
+ {
+ return vmovltq_m_u8 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmovltt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovltt.u8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint8x16_t a, mve_pred16_t p)
+ {
+ return vmovltq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_s16.c
+@@ -1,21 +1,41 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmovlt.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int16x8_t a)
+ {
+ return vmovltq_s16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vmovlt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmovlt.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int16x8_t a)
+ {
+ return vmovltq (a);
+ }
+
+-/* { dg-final { scan-assembler "vmovlt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmovlt.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int8x16_t a)
+ {
+ return vmovltq_s8 (a);
+ }
+
+-/* { dg-final { scan-assembler "vmovlt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmovlt.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int8x16_t a)
+ {
+ return vmovltq (a);
+ }
+
+-/* { dg-final { scan-assembler "vmovlt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmovlt.u16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint16x8_t a)
+ {
+- return vmovltq_u16 (a);
++ return vmovltq_u16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vmovlt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmovlt.u16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint16x8_t a)
+ {
+- return vmovltq (a);
++ return vmovltq (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmovlt.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmovlt.u8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint8x16_t a)
+ {
+- return vmovltq_u8 (a);
++ return vmovltq_u8 (a);
+ }
+
+-/* { dg-final { scan-assembler "vmovlt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmovlt.u8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint8x16_t a)
+ {
+- return vmovltq (a);
++ return vmovltq (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmovlt.u8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_x_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovltt.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int16x8_t a, mve_pred16_t p)
+ {
+ return vmovltq_x_s16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmovltt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovltt.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int16x8_t a, mve_pred16_t p)
+ {
+ return vmovltq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_x_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovltt.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int8x16_t a, mve_pred16_t p)
+ {
+ return vmovltq_x_s8 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmovltt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovltt.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int8x16_t a, mve_pred16_t p)
+ {
+ return vmovltq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_x_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_x_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovltt.u16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint16x8_t a, mve_pred16_t p)
+ {
+ return vmovltq_x_u16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmovltt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovltt.u16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint16x8_t a, mve_pred16_t p)
+ {
+ return vmovltq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_x_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_x_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovltt.u8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint8x16_t a, mve_pred16_t p)
+ {
+ return vmovltq_x_u8 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmovltt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovltt.u8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint8x16_t a, mve_pred16_t p)
+ {
+ return vmovltq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovnbq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovnbq_m_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovnbt.i16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmovnbq_m_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmovnbt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovnbt.i16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmovnbq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovnbq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovnbq_m_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovnbt.i32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmovnbq_m_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmovnbt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovnbt.i32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmovnbq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovnbq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovnbq_m_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovnbt.i16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmovnbq_m_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmovnbt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovnbt.i16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmovnbq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovnbq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovnbq_m_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovnbt.i32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vmovnbq_m_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmovnbt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovnbt.i32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vmovnbq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovnbq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovnbq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmovnb.i16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int16x8_t b)
+ {
+ return vmovnbq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmovnb.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmovnb.i16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int16x8_t b)
+ {
+ return vmovnbq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmovnb.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovnbq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovnbq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmovnb.i32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32x4_t b)
+ {
+ return vmovnbq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmovnb.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmovnb.i32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32x4_t b)
+ {
+ return vmovnbq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmovnb.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovnbq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovnbq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmovnb.i16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint16x8_t b)
+ {
+ return vmovnbq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmovnb.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmovnb.i16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint16x8_t b)
+ {
+ return vmovnbq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmovnb.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovnbq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovnbq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmovnb.i32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint32x4_t b)
+ {
+ return vmovnbq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmovnb.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmovnb.i32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint32x4_t b)
+ {
+ return vmovnbq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmovnb.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovntq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovntq_m_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovntt.i16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmovntq_m_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmovntt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovntt.i16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmovntq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovntq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovntq_m_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovntt.i32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmovntq_m_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmovntt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovntt.i32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmovntq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovntq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovntq_m_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovntt.i16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmovntq_m_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmovntt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovntt.i16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmovntq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovntq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovntq_m_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovntt.i32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vmovntq_m_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmovntt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmovntt.i32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vmovntq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovntq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovntq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmovnt.i16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int16x8_t b)
+ {
+ return vmovntq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmovnt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmovnt.i16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int16x8_t b)
+ {
+ return vmovntq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmovnt.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovntq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovntq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmovnt.i32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32x4_t b)
+ {
+ return vmovntq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmovnt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmovnt.i32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32x4_t b)
+ {
+ return vmovntq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmovnt.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovntq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovntq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmovnt.i16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint16x8_t b)
+ {
+ return vmovntq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmovnt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmovnt.i16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint16x8_t b)
+ {
+ return vmovntq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmovnt.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovntq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovntq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmovnt.i32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint32x4_t b)
+ {
+ return vmovntq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmovnt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmovnt.i32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint32x4_t b)
+ {
+ return vmovntq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmovnt.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulht.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmulhq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulht.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulht.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmulhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulht.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulht.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmulhq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulht.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulht.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmulhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulht.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulht.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vmulhq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulht.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulht.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vmulhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulht.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_m_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulht.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmulhq_m_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulht.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulht.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmulhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulht.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_m_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulht.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vmulhq_m_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulht.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulht.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vmulhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulht.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_m_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulht.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vmulhq_m_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulht.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulht.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vmulhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulht.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmulh.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vmulhq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmulh.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmulh.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vmulhq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmulh.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmulh.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vmulhq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmulh.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmulh.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vmulhq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmulh.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmulh.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vmulhq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmulh.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmulh.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vmulhq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmulh.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmulh.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b)
+ {
+ return vmulhq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmulh.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmulh.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b)
+ {
+ return vmulhq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmulh.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmulh.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b)
+ {
+ return vmulhq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmulh.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmulh.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b)
+ {
+ return vmulhq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmulh.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmulh.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+ return vmulhq_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmulh.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmulh.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b)
+ {
+ return vmulhq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmulh.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulht.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmulhq_x_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulht.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulht.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmulhq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulht.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmulhq_x_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulht.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulht.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmulhq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulht.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vmulhq_x_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulht.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulht.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vmulhq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulht.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmulhq_x_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulht.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulht.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmulhq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulht.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vmulhq_x_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulht.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulht.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vmulhq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulht.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vmulhq_x_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulht.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulht.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vmulhq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmullbq_int_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmullbt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmullbq_int_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmullbt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo (int64x2_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmullbq_int_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmullbt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo1 (int64x2_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmullbq_int_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmullbt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vmullbq_int_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmullbt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vmullbq_int_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmullbt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_m_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmullbq_int_m_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmullbt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmullbq_int_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmullbt.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_m_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64x2_t
+ foo (uint64x2_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vmullbq_int_m_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmullbt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64x2_t
+ foo1 (uint64x2_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vmullbq_int_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmullbt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_m_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vmullbq_int_m_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmullbt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vmullbq_int_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmullbt.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmullb.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vmullbq_int_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullb.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmullb.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vmullbq_int (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullb.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmullb.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vmullbq_int_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullb.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmullb.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vmullbq_int (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullb.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmullb.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vmullbq_int_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullb.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmullb.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vmullbq_int (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullb.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmullb.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint16x8_t a, uint16x8_t b)
+ {
+ return vmullbq_int_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullb.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmullb.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint16x8_t a, uint16x8_t b)
+ {
+ return vmullbq_int (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullb.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmullb.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64x2_t
+ foo (uint32x4_t a, uint32x4_t b)
+ {
+ return vmullbq_int_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullb.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmullb.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64x2_t
+ foo1 (uint32x4_t a, uint32x4_t b)
+ {
+ return vmullbq_int (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullb.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmullb.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+ return vmullbq_int_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullb.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmullb.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint8x16_t a, uint8x16_t b)
+ {
+ return vmullbq_int (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullb.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmullbq_int_x_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmullbt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmullbq_int_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmullbq_int_x_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmullbt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmullbq_int_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vmullbq_int_x_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmullbt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vmullbq_int_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmullbq_int_x_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmullbt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmullbq_int_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64x2_t
+ foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vmullbq_int_x_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmullbt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64x2_t
+ foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vmullbq_int_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vmullbq_int_x_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmullbt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vmullbq_int_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_poly_m_p16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_poly_m_p16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.p16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmullbq_poly_m_p16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmullbt.p16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.p16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmullbq_poly_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmullbt.p16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_poly_m_p8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_poly_m_p8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.p8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vmullbq_poly_m_p8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmullbt.p8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.p8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vmullbq_poly_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmullbt.p8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_poly_p16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_poly_p16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmullb.p16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint16x8_t a, uint16x8_t b)
+ {
+ return vmullbq_poly_p16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullb.p16" } } */
+
++/*
++**foo1:
++** ...
++** vmullb.p16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint16x8_t a, uint16x8_t b)
+ {
+ return vmullbq_poly (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullb.p16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_poly_p8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_poly_p8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmullb.p8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+ return vmullbq_poly_p8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullb.p8" } } */
+
++/*
++**foo1:
++** ...
++** vmullb.p8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint8x16_t a, uint8x16_t b)
+ {
+ return vmullbq_poly (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullb.p8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_poly_x_p16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_poly_x_p16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.p16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmullbq_poly_x_p16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmullbt.p16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.p16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmullbq_poly_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_poly_x_p8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_poly_x_p8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.p8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vmullbq_poly_x_p8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmullbt.p8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmullbt.p8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vmullbq_poly_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmulltq_int_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulltt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmulltq_int_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulltt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo (int64x2_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmulltq_int_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulltt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo1 (int64x2_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmulltq_int_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulltt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vmulltq_int_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulltt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vmulltq_int_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulltt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_m_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmulltq_int_m_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulltt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmulltq_int_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulltt.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_m_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64x2_t
+ foo (uint64x2_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vmulltq_int_m_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulltt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64x2_t
+ foo1 (uint64x2_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vmulltq_int_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulltt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_m_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vmulltq_int_m_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulltt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vmulltq_int_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulltt.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmullt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vmulltq_int_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmullt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vmulltq_int (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmullt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vmulltq_int_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmullt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vmulltq_int (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmullt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vmulltq_int_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmullt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vmulltq_int (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmullt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint16x8_t a, uint16x8_t b)
+ {
+ return vmulltq_int_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmullt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint16x8_t a, uint16x8_t b)
+ {
+ return vmulltq_int (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullt.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmullt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64x2_t
+ foo (uint32x4_t a, uint32x4_t b)
+ {
+ return vmulltq_int_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmullt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64x2_t
+ foo1 (uint32x4_t a, uint32x4_t b)
+ {
+ return vmulltq_int (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmullt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+ return vmulltq_int_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmullt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint8x16_t a, uint8x16_t b)
+ {
+ return vmulltq_int (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullt.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmulltq_int_x_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulltt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmulltq_int_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmulltq_int_x_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulltt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmulltq_int_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vmulltq_int_x_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulltt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vmulltq_int_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmulltq_int_x_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulltt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmulltq_int_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64x2_t
+ foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vmulltq_int_x_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulltt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64x2_t
+ foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vmulltq_int_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vmulltq_int_x_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulltt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vmulltq_int_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_poly_m_p16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_poly_m_p16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.p16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmulltq_poly_m_p16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulltt.p16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.p16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmulltq_poly_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulltt.p16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_poly_m_p8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_poly_m_p8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.p8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vmulltq_poly_m_p8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulltt.p8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.p8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vmulltq_poly_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulltt.p8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_poly_p16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_poly_p16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmullt.p16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint16x8_t a, uint16x8_t b)
+ {
+ return vmulltq_poly_p16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullt.p16" } } */
+
++/*
++**foo1:
++** ...
++** vmullt.p16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint16x8_t a, uint16x8_t b)
+ {
+ return vmulltq_poly (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullt.p16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_poly_p8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_poly_p8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmullt.p8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+ return vmulltq_poly_p8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullt.p8" } } */
+
++/*
++**foo1:
++** ...
++** vmullt.p8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint8x16_t a, uint8x16_t b)
+ {
+ return vmulltq_poly (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmullt.p8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_poly_x_p16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_poly_x_p16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.p16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmulltq_poly_x_p16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulltt.p16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.p16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmulltq_poly_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_poly_x_p8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_poly_x_p8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.p8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vmulltq_poly_x_p8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmulltt.p8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmulltt.p8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vmulltq_poly_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_f16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmul.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b)
+ {
+ return vmulq_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmul.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b)
+ {
+ return vmulq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmul.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b)
+ {
+ return vmulq_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmul.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b)
+ {
+ return vmulq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_f16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vmulq_m_f16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vmulq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_f32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vmulq_m_f32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vmulq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_n_f16-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float16x8_t
+-foo1 (float16x8_t inactive, float16x8_t a, float16_t b, mve_pred16_t p)
+-{
+- return vmulq_m (inactive, a, 23.23, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_n_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_n_f16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16x8_t a, float16_t b, mve_pred16_t p)
+ {
+ return vmulq_m_n_f16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16x8_t a, float16_t b, mve_pred16_t p)
+ {
+ return vmulq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.f16" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float16x8_t
++foo2 (float16x8_t inactive, float16x8_t a, mve_pred16_t p)
++{
++ return vmulq_m (inactive, a, 1.1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_n_f32-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float32x4_t
+-foo1 (float32x4_t inactive, float32x4_t a, float32_t b, mve_pred16_t p)
+-{
+- return vmulq_m (inactive, a, 23.23, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_n_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_n_f32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float32x4_t a, float32_t b, mve_pred16_t p)
+ {
+ return vmulq_m_n_f32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, float32x4_t a, float32_t b, mve_pred16_t p)
+ {
+ return vmulq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.f32" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float32x4_t
++foo2 (float32x4_t inactive, float32x4_t a, mve_pred16_t p)
++{
++ return vmulq_m (inactive, a, 1.1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vmulq_m_n_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vmulq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vmulq_m_n_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vmulq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_n_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vmulq_m_n_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vmulq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_n_u16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, uint16_t b, mve_pred16_t p)
+ {
+ return vmulq_m_n_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, uint16_t b, mve_pred16_t p)
+ {
+ return vmulq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i16" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t inactive, uint16x8_t a, mve_pred16_t p)
++{
++ return vmulq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_n_u32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vmulq_m_n_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vmulq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i32" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t inactive, uint32x4_t a, mve_pred16_t p)
++{
++ return vmulq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_n_u8.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, uint8_t b, mve_pred16_t p)
+ {
+ return vmulq_m_n_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, uint8_t b, mve_pred16_t p)
+ {
+ return vmulq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i8" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
++{
++ return vmulq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmulq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmulq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmulq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmulq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vmulq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vmulq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmulq_m_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmulq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vmulq_m_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vmulq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_m_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vmulq_m_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vmulq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_f16-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float16x8_t
+-foo1 (float16x8_t a, float16_t b)
+-{
+- return vmulq (a, 23.23);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_f16.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmul.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16_t b)
+ {
+ return vmulq_n_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmul.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16_t b)
+ {
+ return vmulq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.f16" } } */
++/*
++**foo2:
++** ...
++** vmul.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float16x8_t
++foo2 (float16x8_t a)
++{
++ return vmulq (a, 1.1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_f32-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float32x4_t
+-foo1 (float32x4_t a, float32_t b)
+-{
+- return vmulq (a, 23.23);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_f32.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmul.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32_t b)
+ {
+ return vmulq_n_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmul.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32_t b)
+ {
+ return vmulq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.f32" } } */
++/*
++**foo2:
++** ...
++** vmul.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float32x4_t
++foo2 (float32x4_t a)
++{
++ return vmulq (a, 1.1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmul.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16_t b)
+ {
+ return vmulq_n_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmul.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16_t b)
+ {
+ return vmulq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmul.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32_t b)
+ {
+ return vmulq_n_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmul.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32_t b)
+ {
+ return vmulq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmul.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8_t b)
+ {
+ return vmulq_n_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmul.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8_t b)
+ {
+ return vmulq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_u16.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmul.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16_t b)
+ {
+ return vmulq_n_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmul.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16_t b)
+ {
+ return vmulq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.i16" } } */
++/*
++**foo2:
++** ...
++** vmul.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t a)
++{
++ return vmulq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_u32.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmul.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32_t b)
+ {
+ return vmulq_n_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmul.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32_t b)
+ {
+ return vmulq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.i32" } } */
++/*
++**foo2:
++** ...
++** vmul.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t a)
++{
++ return vmulq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_u8.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmul.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8_t b)
+ {
+ return vmulq_n_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmul.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8_t b)
+ {
+ return vmulq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.i8" } } */
++/*
++**foo2:
++** ...
++** vmul.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t a)
++{
++ return vmulq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmul.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vmulq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmul.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vmulq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmul.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vmulq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmul.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vmulq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmul.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vmulq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmul.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vmulq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmul.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b)
+ {
+ return vmulq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmul.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b)
+ {
+ return vmulq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmul.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b)
+ {
+ return vmulq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmul.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b)
+ {
+ return vmulq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmul.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+ return vmulq_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmul.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b)
+ {
+ return vmulq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vmul.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_f16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vmulq_x_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vmulq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_f32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vmulq_x_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vmulq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_f16-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float16x8_t
+-foo1 (float16x8_t a, float16_t b, mve_pred16_t p)
+-{
+- return vmulq_x (a, 23.23, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_f16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16_t b, mve_pred16_t p)
+ {
+ return vmulq_x_n_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16_t b, mve_pred16_t p)
+ {
+ return vmulq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.f16" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float16x8_t
++foo2 (float16x8_t a, mve_pred16_t p)
++{
++ return vmulq_x (a, 1.1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_f32-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float32x4_t
+-foo1 (float32x4_t a, float32_t b, mve_pred16_t p)
+-{
+- return vmulq_x (a, 23.23, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_f32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32_t b, mve_pred16_t p)
+ {
+ return vmulq_x_n_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32_t b, mve_pred16_t p)
+ {
+ return vmulq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.f32" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float32x4_t
++foo2 (float32x4_t a, mve_pred16_t p)
++{
++ return vmulq_x (a, 1.1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vmulq_x_n_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vmulq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vmulq_x_n_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vmulq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vmulq_x_n_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vmulq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_u16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16_t b, mve_pred16_t p)
+ {
+ return vmulq_x_n_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16_t b, mve_pred16_t p)
+ {
+ return vmulq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i16" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t a, mve_pred16_t p)
++{
++ return vmulq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_u32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vmulq_x_n_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vmulq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i32" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t a, mve_pred16_t p)
++{
++ return vmulq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_u8.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8_t b, mve_pred16_t p)
+ {
+ return vmulq_x_n_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8_t b, mve_pred16_t p)
+ {
+ return vmulq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i8" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t a, mve_pred16_t p)
++{
++ return vmulq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmulq_x_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vmulq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmulq_x_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vmulq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vmulq_x_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vmulq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmulq_x_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vmulq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vmulq_x_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vmulq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vmulq_x_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmult.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vmulq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmult.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_n_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt.i16 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, mve_pred16_t p)
+ {
+- return vmvnq_m_n_s16 (inactive, 2, p);
++ return vmvnq_m_n_s16 (inactive, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmvnt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt.i16 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, mve_pred16_t p)
+ {
+- return vmvnq_m (inactive, 2, p);
++ return vmvnq_m (inactive, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_n_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt.i32 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, mve_pred16_t p)
+ {
+- return vmvnq_m_n_s32 (inactive, 2, p);
++ return vmvnq_m_n_s32 (inactive, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmvnt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt.i32 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, mve_pred16_t p)
+ {
+- return vmvnq_m (inactive, 2, p);
++ return vmvnq_m (inactive, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_n_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt.i16 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, mve_pred16_t p)
+ {
+- return vmvnq_m_n_u16 (inactive, 4, p);
++ return vmvnq_m_n_u16 (inactive, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmvnt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt.i16 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, mve_pred16_t p)
+ {
+- return vmvnq_m (inactive, 4, p);
++ return vmvnq_m (inactive, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_n_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt.i32 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, mve_pred16_t p)
+ {
+- return vmvnq_m_n_u32 (inactive, 4, p);
++ return vmvnq_m_n_u32 (inactive, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmvnt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt.i32 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, mve_pred16_t p)
+ {
+- return vmvnq_m (inactive, 4, p);
++ return vmvnq_m (inactive, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vmvnq_m_s16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmvnt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vmvnq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, mve_pred16_t p)
+ {
+ return vmvnq_m_s32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmvnt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, mve_pred16_t p)
+ {
+ return vmvnq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vmvnq_m_s8 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmvnt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vmvnq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, mve_pred16_t p)
+ {
+ return vmvnq_m_u16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmvnt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, mve_pred16_t p)
+ {
+ return vmvnq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, mve_pred16_t p)
+ {
+ return vmvnq_m_u32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmvnt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, mve_pred16_t p)
+ {
+ return vmvnq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
+ {
+ return vmvnq_m_u8 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmvnt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
+ {
+ return vmvnq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_n_s16.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmvn.i16 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo ()
+ {
+ return vmvnq_n_s16 (1);
+ }
+
+-/* { dg-final { scan-assembler "vmvn.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_n_s32.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmvn.i32 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo ()
+ {
+- return vmvnq_n_s32 (2);
++ return vmvnq_n_s32 (1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmvn.i32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_n_u16.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmvn.i16 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo ()
+ {
+- return vmvnq_n_u16 (1);
++ return vmvnq_n_u16 (1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmvn.i16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_n_u32.c
+@@ -1,13 +1,28 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmvn.i32 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo ()
+ {
+- return vmvnq_n_u32 (2);
++ return vmvnq_n_u32 (1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmvn.i32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_s16.c
+@@ -1,21 +1,41 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmvn q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a)
+ {
+ return vmvnq_s16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vmvn" } } */
+
++/*
++**foo1:
++** ...
++** vmvn q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a)
+ {
+ return vmvnq (a);
+ }
+
+-/* { dg-final { scan-assembler "vmvn" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_s32.c
+@@ -1,21 +1,41 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmvn q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a)
+ {
+ return vmvnq_s32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vmvn" } } */
+
++/*
++**foo1:
++** ...
++** vmvn q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a)
+ {
+ return vmvnq (a);
+ }
+
+-/* { dg-final { scan-assembler "vmvn" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmvn q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a)
+ {
+ return vmvnq_s8 (a);
+ }
+
+-/* { dg-final { scan-assembler "vmvn" } } */
+
++/*
++**foo1:
++** ...
++** vmvn q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a)
+ {
+ return vmvnq (a);
+ }
+
+-/* { dg-final { scan-assembler "vmvn" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmvn q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a)
+ {
+- return vmvnq_u16 (a);
++ return vmvnq_u16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vmvn" } } */
+
++/*
++**foo1:
++** ...
++** vmvn q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a)
+ {
+- return vmvnq (a);
++ return vmvnq (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmvn" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmvn q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a)
+ {
+- return vmvnq_u32 (a);
++ return vmvnq_u32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vmvn" } } */
+
++/*
++**foo1:
++** ...
++** vmvn q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a)
+ {
+- return vmvnq (a);
++ return vmvnq (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmvn" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmvn q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a)
+ {
+- return vmvnq_u8 (a);
++ return vmvnq_u8 (a);
+ }
+
+-/* { dg-final { scan-assembler "vmvn" } } */
+
++/*
++**foo1:
++** ...
++** vmvn q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a)
+ {
+- return vmvnq (a);
++ return vmvnq (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vmvn" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_n_s16.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt.i16 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (mve_pred16_t p)
+ {
+- return vmvnq_x_n_s16 (2, p);
++ return vmvnq_x_n_s16 (1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmvnt.i16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_n_s32.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt.i32 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (mve_pred16_t p)
+ {
+- return vmvnq_x_n_s32 (2, p);
++ return vmvnq_x_n_s32 (1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmvnt.i32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_n_u16.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt.i16 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (mve_pred16_t p)
+ {
+- return vmvnq_x_n_u16 (4, p);
++ return vmvnq_x_n_u16 (1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmvnt.i16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_n_u32.c
+@@ -1,14 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt.i32 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (mve_pred16_t p)
+ {
+- return vmvnq_x_n_u32 (4, p);
++ return vmvnq_x_n_u32 (1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmvnt.i32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, mve_pred16_t p)
+ {
+ return vmvnq_x_s16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmvnt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, mve_pred16_t p)
+ {
+ return vmvnq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmvnt" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, mve_pred16_t p)
+ {
+ return vmvnq_x_s32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmvnt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, mve_pred16_t p)
+ {
+ return vmvnq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmvnt" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, mve_pred16_t p)
+ {
+ return vmvnq_x_s8 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmvnt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, mve_pred16_t p)
+ {
+ return vmvnq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmvnt" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, mve_pred16_t p)
+ {
+ return vmvnq_x_u16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmvnt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, mve_pred16_t p)
+ {
+ return vmvnq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmvnt" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, mve_pred16_t p)
+ {
+ return vmvnq_x_u32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmvnt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, mve_pred16_t p)
+ {
+ return vmvnq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmvnt" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, mve_pred16_t p)
+ {
+ return vmvnq_x_u8 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmvnt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vmvnt q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, mve_pred16_t p)
+ {
+ return vmvnq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vmvnt" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_f16.c
+@@ -1,13 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vneg.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a)
+ {
+ return vnegq_f16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vneg.f16" } } */
++
++/*
++**foo1:
++** ...
++** vneg.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++float16x8_t
++foo1 (float16x8_t a)
++{
++ return vnegq (a);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_f32.c
+@@ -1,13 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vneg.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a)
+ {
+ return vnegq_f32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vneg.f32" } } */
++
++/*
++**foo1:
++** ...
++** vneg.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++float32x4_t
++foo1 (float32x4_t a)
++{
++ return vnegq (a);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_m_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vnegt.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vnegq_m_f16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vnegt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vnegt.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vnegq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_m_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vnegt.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vnegq_m_f32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vnegt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vnegt.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vnegq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_m_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vnegt.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vnegq_m_s16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vnegt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vnegt.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vnegq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_m_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vnegt.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, mve_pred16_t p)
+ {
+ return vnegq_m_s32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vnegt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vnegt.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, mve_pred16_t p)
+ {
+ return vnegq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_m_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vnegt.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vnegq_m_s8 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vnegt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vnegt.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vnegq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_s16.c
+@@ -1,21 +1,41 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vneg.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a)
+ {
+ return vnegq_s16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vneg.s16" } } */
+
++/*
++**foo1:
++** ...
++** vneg.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a)
+ {
+ return vnegq (a);
+ }
+
+-/* { dg-final { scan-assembler "vneg.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_s32.c
+@@ -1,21 +1,41 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vneg.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a)
+ {
+ return vnegq_s32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vneg.s32" } } */
+
++/*
++**foo1:
++** ...
++** vneg.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a)
+ {
+ return vnegq (a);
+ }
+
+-/* { dg-final { scan-assembler "vneg.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vneg.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a)
+ {
+ return vnegq_s8 (a);
+ }
+
+-/* { dg-final { scan-assembler "vneg.s8" } } */
+
++/*
++**foo1:
++** ...
++** vneg.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a)
+ {
+ return vnegq (a);
+ }
+
+-/* { dg-final { scan-assembler "vneg.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_x_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_x_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vnegt.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, mve_pred16_t p)
+ {
+ return vnegq_x_f16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vnegt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vnegt.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, mve_pred16_t p)
+ {
+ return vnegq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_x_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_x_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vnegt.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, mve_pred16_t p)
+ {
+ return vnegq_x_f32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vnegt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vnegt.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, mve_pred16_t p)
+ {
+ return vnegq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_x_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vnegt.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, mve_pred16_t p)
+ {
+ return vnegq_x_s16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vnegt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vnegt.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, mve_pred16_t p)
+ {
+ return vnegq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_x_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_x_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vnegt.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, mve_pred16_t p)
+ {
+ return vnegq_x_s32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vnegt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vnegt.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, mve_pred16_t p)
+ {
+ return vnegq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_x_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vnegt.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, mve_pred16_t p)
+ {
+ return vnegq_x_s8 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vnegt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vnegt.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, mve_pred16_t p)
+ {
+ return vnegq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_f16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vorn q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b)
+ {
+ return vornq_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorn" } } */
+
++/*
++**foo1:
++** ...
++** vorn q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b)
+ {
+ return vornq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorn" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vorn q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b)
+ {
+ return vornq_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorn" } } */
+
++/*
++**foo1:
++** ...
++** vorn q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b)
+ {
+ return vornq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorn" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_m_f16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vornq_m_f16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vornt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vornq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vornt" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_m_f32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vornq_m_f32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vornt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vornq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vornt" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vornq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vornt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vornq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vornt" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vornq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vornt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vornq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vornt" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vornq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vornt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vornq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vornt" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_m_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vornq_m_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vornt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vornq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vornt" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_m_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vornq_m_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vornt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vornq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vornt" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_m_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vornq_m_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vornt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vornq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vornt" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vorn q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vornq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorn" } } */
+
++/*
++**foo1:
++** ...
++** vorn q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vornq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorn" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vorn q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vornq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorn" } } */
+
++/*
++**foo1:
++** ...
++** vorn q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vornq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorn" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vorn q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vornq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorn" } } */
+
++/*
++**foo1:
++** ...
++** vorn q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vornq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorn" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vorn q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b)
+ {
+ return vornq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorn" } } */
+
++/*
++**foo1:
++** ...
++** vorn q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b)
+ {
+ return vornq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorn" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vorn q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b)
+ {
+ return vornq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorn" } } */
+
++/*
++**foo1:
++** ...
++** vorn q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b)
+ {
+ return vornq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorn" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vorn q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+ return vornq_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorn" } } */
+
++/*
++**foo1:
++** ...
++** vorn q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b)
+ {
+ return vornq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorn" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vornq_x_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vornt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vornq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vornq_x_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vornt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vornq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vornq_x_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vornt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vornq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vornq_x_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vornt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vornq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vornq_x_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vornt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vornq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vornq_x_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vornt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vornq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vornq_x_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vornt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vornq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vornq_x_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vornt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vornt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vornq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_f16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vorr q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b)
+ {
+ return vorrq_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorr" } } */
+
++/*
++**foo1:
++** ...
++** vorr q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b)
+ {
+ return vorrq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorr" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vorr q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b)
+ {
+ return vorrq_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorr" } } */
+
++/*
++**foo1:
++** ...
++** vorr q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b)
+ {
+ return vorrq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorr" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_m_f16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vorrq_m_f16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vorrt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vorrq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vorrt" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_m_f32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vorrq_m_f32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vorrt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vorrq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vorrt" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_m_n_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt.i16 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, mve_pred16_t p)
+ {
+- return vorrq_m_n_s16 (a, 253, p);
++ return vorrq_m_n_s16 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vorrt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt.i16 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, mve_pred16_t p)
+ {
+- return vorrq_m_n (a, 253, p);
++ return vorrq_m_n (a, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_m_n_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt.i32 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, mve_pred16_t p)
+ {
+ return vorrq_m_n_s32 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vorrt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt.i32 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, mve_pred16_t p)
+ {
+ return vorrq_m_n (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_m_n_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt.i16 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, mve_pred16_t p)
+ {
+ return vorrq_m_n_u16 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vorrt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt.i16 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, mve_pred16_t p)
+ {
+ return vorrq_m_n (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_m_n_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt.i32 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, mve_pred16_t p)
+ {
+ return vorrq_m_n_u32 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vorrt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt.i32 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, mve_pred16_t p)
+ {
+ return vorrq_m_n (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vorrq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vorrt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vorrq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vorrt" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vorrq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vorrt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vorrq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vorrt" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vorrq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vorrt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vorrq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vorrt" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_m_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vorrq_m_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vorrt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vorrq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vorrt" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_m_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vorrq_m_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vorrt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vorrq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vorrt" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_m_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vorrq_m_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vorrt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vorrq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vorrt" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_n_s16.c
+@@ -1,13 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vorr.i16 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a)
+ {
+ return vorrq_n_s16 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vorr.i16" } } */
++
++/*
++**foo1:
++** ...
++** vorr.i16 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
++int16x8_t
++foo1 (int16x8_t a)
++{
++ return vorrq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_n_s32.c
+@@ -1,13 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vorr.i32 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a)
+ {
+ return vorrq_n_s32 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vorr.i32" } } */
++
++/*
++**foo1:
++** ...
++** vorr.i32 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
++int32x4_t
++foo1 (int32x4_t a)
++{
++ return vorrq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_n_u16.c
+@@ -1,13 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vorr.i16 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a)
+ {
+ return vorrq_n_u16 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vorr.i16" } } */
++
++/*
++**foo1:
++** ...
++** vorr.i16 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo1 (uint16x8_t a)
++{
++ return vorrq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_n_u32.c
+@@ -1,13 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vorr.i32 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a)
+ {
+- return vorrq_n_u32 (a, 44);
++ return vorrq_n_u32 (a, 1);
++}
++
++
++/*
++**foo1:
++** ...
++** vorr.i32 q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo1 (uint32x4_t a)
++{
++ return vorrq (a, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vorr.i32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vorr q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vorrq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorr" } } */
+
++/*
++**foo1:
++** ...
++** vorr q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vorrq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorr" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vorr q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vorrq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorr" } } */
+
++/*
++**foo1:
++** ...
++** vorr q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vorrq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorr" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vorr q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vorrq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorr" } } */
+
++/*
++**foo1:
++** ...
++** vorr q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vorrq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorr" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vorr q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b)
+ {
+ return vorrq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorr" } } */
+
++/*
++**foo1:
++** ...
++** vorr q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b)
+ {
+ return vorrq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorr" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vorr q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b)
+ {
+ return vorrq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorr" } } */
+
++/*
++**foo1:
++** ...
++** vorr q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b)
+ {
+ return vorrq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorr" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vorr q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+ return vorrq_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorr" } } */
+
++/*
++**foo1:
++** ...
++** vorr q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b)
+ {
+ return vorrq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vorr" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vorrq_x_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vorrt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vorrq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vorrq_x_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vorrt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vorrq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vorrq_x_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vorrt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vorrq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vorrq_x_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vorrt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vorrq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vorrq_x_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vorrt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vorrq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vorrq_x_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vorrt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vorrq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vorrq_x_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vorrt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vorrq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vorrq_x_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vorrt" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vorrt q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vorrq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpnot.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpnot.c
+@@ -1,21 +1,32 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpnot(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), p0(?: @.*|)
++** ...
++*/
+ mve_pred16_t
+ foo (mve_pred16_t a)
+ {
+ return vpnot (a);
+ }
+
+-/* { dg-final { scan-assembler "vpnot" } } */
+-
+-mve_pred16_t
+-foo1 (mve_pred16_t a)
+-{
+- return vpnot (a);
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpnot" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_f16.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpsel q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vpselq_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpsel" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpsel q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vpselq (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpsel" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_f32.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpsel q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vpselq_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpsel" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpsel q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vpselq (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpsel" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_s16.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpsel q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vpselq_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpsel" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpsel q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vpselq (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpsel" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_s32.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpsel q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vpselq_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpsel" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpsel q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vpselq (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpsel" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_s64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_s64.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpsel q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo (int64x2_t a, int64x2_t b, mve_pred16_t p)
+ {
+ return vpselq_s64 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpsel" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpsel q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo1 (int64x2_t a, int64x2_t b, mve_pred16_t p)
+ {
+ return vpselq (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpsel" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_s8.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpsel q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vpselq_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpsel" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpsel q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vpselq (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpsel" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_u16.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpsel q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vpselq_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpsel" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpsel q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vpselq (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpsel" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_u32.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpsel q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vpselq_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpsel" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpsel q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vpselq (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpsel" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_u64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_u64.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpsel q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64x2_t
+ foo (uint64x2_t a, uint64x2_t b, mve_pred16_t p)
+ {
+ return vpselq_u64 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpsel" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpsel q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64x2_t
+ foo1 (uint64x2_t a, uint64x2_t b, mve_pred16_t p)
+ {
+ return vpselq (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpsel" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_u8.c
+@@ -1,21 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpsel q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vpselq_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpsel" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpsel q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vpselq (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpsel" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqabsq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqabsq_m_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqabst.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vqabsq_m_s16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqabst.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqabst.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vqabsq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqabsq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqabsq_m_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqabst.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, mve_pred16_t p)
+ {
+ return vqabsq_m_s32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqabst.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqabst.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, mve_pred16_t p)
+ {
+ return vqabsq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqabsq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqabsq_m_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqabst.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vqabsq_m_s8 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqabst.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqabst.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vqabsq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqabsq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqabsq_s16.c
+@@ -1,21 +1,41 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqabs.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a)
+ {
+ return vqabsq_s16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vqabs.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqabs.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a)
+ {
+ return vqabsq (a);
+ }
+
+-/* { dg-final { scan-assembler "vqabs.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqabsq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqabsq_s32.c
+@@ -1,21 +1,41 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqabs.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a)
+ {
+ return vqabsq_s32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vqabs.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqabs.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a)
+ {
+ return vqabsq (a);
+ }
+
+-/* { dg-final { scan-assembler "vqabs.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqabsq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqabsq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqabs.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a)
+ {
+ return vqabsq_s8 (a);
+ }
+
+-/* { dg-final { scan-assembler "vqabs.s8" } } */
+
++/*
++**foo1:
++** ...
++** vqabs.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a)
+ {
+ return vqabsq (a);
+ }
+
+-/* { dg-final { scan-assembler "vqabs.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqaddt.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vqaddq_m_n_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqaddt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqaddt.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vqaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqaddt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqaddt.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqaddq_m_n_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqaddt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqaddt.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqaddt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_m_n_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqaddt.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vqaddq_m_n_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqaddt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqaddt.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vqaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqaddt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_m_n_u16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqaddt.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, uint16_t b, mve_pred16_t p)
+ {
+ return vqaddq_m_n_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqaddt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqaddt.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, uint16_t b, mve_pred16_t p)
+ {
+ return vqaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqaddt.u16" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqaddt.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t inactive, uint16x8_t a, mve_pred16_t p)
++{
++ return vqaddq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_m_n_u32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqaddt.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vqaddq_m_n_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqaddt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqaddt.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vqaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqaddt.u32" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqaddt.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t inactive, uint32x4_t a, mve_pred16_t p)
++{
++ return vqaddq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_m_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_m_n_u8.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqaddt.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, uint8_t b, mve_pred16_t p)
+ {
+ return vqaddq_m_n_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqaddt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqaddt.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, uint8_t b, mve_pred16_t p)
+ {
+ return vqaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqaddt.u8" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqaddt.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
++{
++ return vqaddq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqaddt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqaddq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqaddt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqaddt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqaddt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqaddt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqaddq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqaddt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqaddt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqaddt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqaddt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqaddq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqaddt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqaddt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqaddt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_m_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqaddt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vqaddq_m_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqaddt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqaddt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vqaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqaddt.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_m_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqaddt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vqaddq_m_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqaddt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqaddt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vqaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqaddt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_m_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqaddt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vqaddq_m_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqaddt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqaddt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vqaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqaddt.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqadd.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16_t b)
+ {
+ return vqaddq_n_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqadd.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqadd.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16_t b)
+ {
+ return vqaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqadd.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqadd.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32_t b)
+ {
+ return vqaddq_n_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqadd.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqadd.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32_t b)
+ {
+ return vqaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqadd.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqadd.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8_t b)
+ {
+ return vqaddq_n_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqadd.s8" } } */
+
++/*
++**foo1:
++** ...
++** vqadd.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8_t b)
+ {
+ return vqaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqadd.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_u16.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqadd.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16_t b)
+ {
+ return vqaddq_n_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqadd.u16" } } */
+
++/*
++**foo1:
++** ...
++** vqadd.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16_t b)
+ {
+ return vqaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqadd.u16" } } */
++/*
++**foo2:
++** ...
++** vqadd.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t a)
++{
++ return vqaddq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_u32.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqadd.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32_t b)
+ {
+ return vqaddq_n_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqadd.u32" } } */
+
++/*
++**foo1:
++** ...
++** vqadd.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32_t b)
+ {
+ return vqaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqadd.u32" } } */
++/*
++**foo2:
++** ...
++** vqadd.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t a)
++{
++ return vqaddq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_u8.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqadd.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8_t b)
+ {
+ return vqaddq_n_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqadd.u8" } } */
+
++/*
++**foo1:
++** ...
++** vqadd.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8_t b)
+ {
+ return vqaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqadd.u8" } } */
++/*
++**foo2:
++** ...
++** vqadd.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t a)
++{
++ return vqaddq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqadd.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vqaddq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqadd.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqadd.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vqaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqadd.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqadd.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vqaddq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqadd.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqadd.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vqaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqadd.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqadd.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vqaddq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqadd.s8" } } */
+
++/*
++**foo1:
++** ...
++** vqadd.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vqaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqadd.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqadd.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b)
+ {
+ return vqaddq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqadd.u16" } } */
+
++/*
++**foo1:
++** ...
++** vqadd.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b)
+ {
+ return vqaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqadd.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqadd.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b)
+ {
+ return vqaddq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqadd.u32" } } */
+
++/*
++**foo1:
++** ...
++** vqadd.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b)
+ {
+ return vqaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqadd.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqadd.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+ return vqaddq_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqadd.u8" } } */
+
++/*
++**foo1:
++** ...
++** vqadd.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b)
+ {
+ return vqaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqadd.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmladht.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqdmladhq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmladht.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmladht.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqdmladhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmladht.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmladht.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqdmladhq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmladht.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmladht.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqdmladhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmladht.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhq_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmladht.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqdmladhq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmladht.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmladht.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqdmladhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmladht.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmladh.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b)
+ {
+ return vqdmladhq_s16 (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmladh.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqdmladh.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b)
+ {
+ return vqdmladhq (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmladh.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmladh.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b)
+ {
+ return vqdmladhq_s32 (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmladh.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqdmladh.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b)
+ {
+ return vqdmladhq (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmladh.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmladh.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b)
+ {
+ return vqdmladhq_s8 (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmladh.s8" } } */
+
++/*
++**foo1:
++** ...
++** vqdmladh.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b)
+ {
+ return vqdmladhq (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmladh.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhxq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhxq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmladhxt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqdmladhxq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmladhxt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmladhxt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqdmladhxq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmladhxt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhxq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhxq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmladhxt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqdmladhxq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmladhxt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmladhxt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqdmladhxq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmladhxt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhxq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhxq_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmladhxt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqdmladhxq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmladhxt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmladhxt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqdmladhxq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmladhxt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhxq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhxq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmladhx.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b)
+ {
+ return vqdmladhxq_s16 (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmladhx.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqdmladhx.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b)
+ {
+ return vqdmladhxq (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmladhx.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhxq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhxq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmladhx.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b)
+ {
+ return vqdmladhxq_s32 (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmladhx.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqdmladhx.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b)
+ {
+ return vqdmladhxq (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmladhx.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhxq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhxq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmladhx.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b)
+ {
+ return vqdmladhxq_s8 (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmladhx.s8" } } */
+
++/*
++**foo1:
++** ...
++** vqdmladhx.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b)
+ {
+ return vqdmladhxq (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmladhx.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmlaht.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo (int16x8_t a, int16x8_t b, int16_t c, mve_pred16_t p)
++foo (int16x8_t add, int16x8_t m1, int16_t m2, mve_pred16_t p)
+ {
+- return vqdmlahq_m_n_s16 (a, b, c, p);
++ return vqdmlahq_m_n_s16 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmlaht.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmlaht.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo1 (int16x8_t a, int16x8_t b, int16_t c, mve_pred16_t p)
++foo1 (int16x8_t add, int16x8_t m1, int16_t m2, mve_pred16_t p)
+ {
+- return vqdmlahq_m (a, b, c, p);
++ return vqdmlahq_m (add, m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmlaht.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmlaht.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int32x4_t a, int32x4_t b, int32_t c, mve_pred16_t p)
++foo (int32x4_t add, int32x4_t m1, int32_t m2, mve_pred16_t p)
+ {
+- return vqdmlahq_m_n_s32 (a, b, c, p);
++ return vqdmlahq_m_n_s32 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmlaht.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmlaht.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int32x4_t a, int32x4_t b, int32_t c, mve_pred16_t p)
++foo1 (int32x4_t add, int32x4_t m1, int32_t m2, mve_pred16_t p)
+ {
+- return vqdmlahq_m (a, b, c, p);
++ return vqdmlahq_m (add, m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmlaht.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_m_n_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmlaht.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo (int8x16_t a, int8x16_t b, int8_t c, mve_pred16_t p)
++foo (int8x16_t add, int8x16_t m1, int8_t m2, mve_pred16_t p)
+ {
+- return vqdmlahq_m_n_s8 (a, b, c, p);
++ return vqdmlahq_m_n_s8 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmlaht.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmlaht.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo1 (int8x16_t a, int8x16_t b, int8_t c, mve_pred16_t p)
++foo1 (int8x16_t add, int8x16_t m1, int8_t m2, mve_pred16_t p)
+ {
+- return vqdmlahq_m (a, b, c, p);
++ return vqdmlahq_m (add, m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmlaht.s8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmlah.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo (int16x8_t a, int16x8_t b, int16_t c)
++foo (int16x8_t add, int16x8_t m1, int16_t m2)
+ {
+- return vqdmlahq_n_s16 (a, b, c);
++ return vqdmlahq_n_s16 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vqdmlah.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqdmlah.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo1 (int16x8_t a, int16x8_t b, int16_t c)
++foo1 (int16x8_t add, int16x8_t m1, int16_t m2)
+ {
+- return vqdmlahq (a, b, c);
++ return vqdmlahq (add, m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vqdmlah.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmlah.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int32x4_t a, int32x4_t b, int32_t c)
++foo (int32x4_t add, int32x4_t m1, int32_t m2)
+ {
+- return vqdmlahq_n_s32 (a, b, c);
++ return vqdmlahq_n_s32 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vqdmlah.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqdmlah.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int32x4_t a, int32x4_t b, int32_t c)
++foo1 (int32x4_t add, int32x4_t m1, int32_t m2)
+ {
+- return vqdmlahq (a, b, c);
++ return vqdmlahq (add, m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vqdmlah.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_n_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmlah.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo (int8x16_t a, int8x16_t b, int8_t c)
++foo (int8x16_t add, int8x16_t m1, int8_t m2)
+ {
+- return vqdmlahq_n_s8 (a, b, c);
++ return vqdmlahq_n_s8 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vqdmlah.s8" } } */
+
++/*
++**foo1:
++** ...
++** vqdmlah.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo1 (int8x16_t a, int8x16_t b, int8_t c)
++foo1 (int8x16_t add, int8x16_t m1, int8_t m2)
+ {
+- return vqdmlahq (a, b, c);
++ return vqdmlahq (add, m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vqdmlah.s8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlashq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlashq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmlasht.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo (int16x8_t a, int16x8_t b, int16_t c, mve_pred16_t p)
++foo (int16x8_t m1, int16x8_t m2, int16_t add, mve_pred16_t p)
+ {
+- return vqdmlashq_m_n_s16 (a, b, c, p);
++ return vqdmlashq_m_n_s16 (m1, m2, add, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmlasht.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmlasht.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo1 (int16x8_t a, int16x8_t b, int16_t c, mve_pred16_t p)
++foo1 (int16x8_t m1, int16x8_t m2, int16_t add, mve_pred16_t p)
+ {
+- return vqdmlashq_m (a, b, c, p);
++ return vqdmlashq_m (m1, m2, add, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmlasht.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlashq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlashq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmlasht.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int32x4_t a, int32x4_t b, int32_t c, mve_pred16_t p)
++foo (int32x4_t m1, int32x4_t m2, int32_t add, mve_pred16_t p)
+ {
+- return vqdmlashq_m_n_s32 (a, b, c, p);
++ return vqdmlashq_m_n_s32 (m1, m2, add, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmlasht.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmlasht.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int32x4_t a, int32x4_t b, int32_t c, mve_pred16_t p)
++foo1 (int32x4_t m1, int32x4_t m2, int32_t add, mve_pred16_t p)
+ {
+- return vqdmlashq_m (a, b, c, p);
++ return vqdmlashq_m (m1, m2, add, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmlasht.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlashq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlashq_m_n_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmlasht.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo (int8x16_t a, int8x16_t b, int8_t c, mve_pred16_t p)
++foo (int8x16_t m1, int8x16_t m2, int8_t add, mve_pred16_t p)
+ {
+- return vqdmlashq_m_n_s8 (a, b, c, p);
++ return vqdmlashq_m_n_s8 (m1, m2, add, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmlasht.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmlasht.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo1 (int8x16_t a, int8x16_t b, int8_t c, mve_pred16_t p)
++foo1 (int8x16_t m1, int8x16_t m2, int8_t add, mve_pred16_t p)
+ {
+- return vqdmlashq_m (a, b, c, p);
++ return vqdmlashq_m (m1, m2, add, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmlasht.s8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlashq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlashq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmlash.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo (int16x8_t a, int16x8_t b, int16_t c)
++foo (int16x8_t m1, int16x8_t m2, int16_t add)
+ {
+- return vqdmlashq_n_s16 (a, b, c);
++ return vqdmlashq_n_s16 (m1, m2, add);
+ }
+
+-/* { dg-final { scan-assembler "vqdmlash.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqdmlash.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo1 (int16x8_t a, int16x8_t b, int16_t c)
++foo1 (int16x8_t m1, int16x8_t m2, int16_t add)
+ {
+- return vqdmlashq (a, b, c);
++ return vqdmlashq (m1, m2, add);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vqdmlash.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlashq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlashq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmlash.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int32x4_t a, int32x4_t b, int32_t c)
++foo (int32x4_t m1, int32x4_t m2, int32_t add)
+ {
+- return vqdmlashq_n_s32 (a, b, c);
++ return vqdmlashq_n_s32 (m1, m2, add);
+ }
+
+-/* { dg-final { scan-assembler "vqdmlash.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqdmlash.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int32x4_t a, int32x4_t b, int32_t c)
++foo1 (int32x4_t m1, int32x4_t m2, int32_t add)
+ {
+- return vqdmlashq (a, b, c);
++ return vqdmlashq (m1, m2, add);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vqdmlash.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlashq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlashq_n_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmlash.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo (int8x16_t a, int8x16_t b, int8_t c)
++foo (int8x16_t m1, int8x16_t m2, int8_t add)
+ {
+- return vqdmlashq_n_s8 (a, b, c);
++ return vqdmlashq_n_s8 (m1, m2, add);
+ }
+
+-/* { dg-final { scan-assembler "vqdmlash.s8" } } */
+
++/*
++**foo1:
++** ...
++** vqdmlash.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo1 (int8x16_t a, int8x16_t b, int8_t c)
++foo1 (int8x16_t m1, int8x16_t m2, int8_t add)
+ {
+- return vqdmlashq (a, b, c);
++ return vqdmlashq (m1, m2, add);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vqdmlash.s8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmlsdht.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqdmlsdhq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmlsdht.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmlsdht.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqdmlsdhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmlsdht.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmlsdht.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqdmlsdhq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmlsdht.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmlsdht.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqdmlsdhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmlsdht.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhq_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmlsdht.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqdmlsdhq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmlsdht.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmlsdht.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqdmlsdhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmlsdht.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmlsdh.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b)
+ {
+ return vqdmlsdhq_s16 (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmlsdh.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqdmlsdh.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b)
+ {
+ return vqdmlsdhq (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmlsdh.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmlsdh.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b)
+ {
+ return vqdmlsdhq_s32 (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmlsdh.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqdmlsdh.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b)
+ {
+ return vqdmlsdhq (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmlsdh.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmlsdh.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b)
+ {
+ return vqdmlsdhq_s8 (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmlsdh.s8" } } */
+
++/*
++**foo1:
++** ...
++** vqdmlsdh.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b)
+ {
+ return vqdmlsdhq (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmlsdh.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhxq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhxq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmlsdhxt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqdmlsdhxq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmlsdhxt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmlsdhxt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqdmlsdhxq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmlsdhxt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhxq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhxq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmlsdhxt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqdmlsdhxq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmlsdhxt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmlsdhxt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqdmlsdhxq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmlsdhxt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhxq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhxq_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmlsdhxt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqdmlsdhxq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmlsdhxt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmlsdhxt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqdmlsdhxq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmlsdhxt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhxq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhxq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmlsdhx.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b)
+ {
+ return vqdmlsdhxq_s16 (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmlsdhx.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqdmlsdhx.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b)
+ {
+ return vqdmlsdhxq (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmlsdhx.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhxq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhxq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmlsdhx.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b)
+ {
+ return vqdmlsdhxq_s32 (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmlsdhx.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqdmlsdhx.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b)
+ {
+ return vqdmlsdhxq (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmlsdhx.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhxq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhxq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmlsdhx.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b)
+ {
+ return vqdmlsdhxq_s8 (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmlsdhx.s8" } } */
+
++/*
++**foo1:
++** ...
++** vqdmlsdhx.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b)
+ {
+ return vqdmlsdhxq (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmlsdhx.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmulht.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vqdmulhq_m_n_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmulht.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmulht.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vqdmulhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmulht.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmulht.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqdmulhq_m_n_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmulht.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmulht.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqdmulhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmulht.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_m_n_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmulht.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vqdmulhq_m_n_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmulht.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmulht.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vqdmulhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmulht.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmulht.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqdmulhq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmulht.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmulht.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqdmulhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmulht.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmulht.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqdmulhq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmulht.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmulht.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqdmulhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmulht.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmulht.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqdmulhq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmulht.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmulht.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqdmulhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmulht.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmulh.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16_t b)
+ {
+ return vqdmulhq_n_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmulh.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqdmulh.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16_t b)
+ {
+ return vqdmulhq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmulh.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmulh.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32_t b)
+ {
+ return vqdmulhq_n_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmulh.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqdmulh.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32_t b)
+ {
+ return vqdmulhq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmulh.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_n_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmulh.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8_t b)
+ {
+ return vqdmulhq_n_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmulh.s8" } } */
+
++/*
++**foo1:
++** ...
++** vqdmulh.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8_t b)
+ {
+ return vqdmulhq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmulh.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmulh.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vqdmulhq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmulh.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqdmulh.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vqdmulhq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmulh.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmulh.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vqdmulhq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmulh.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqdmulh.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vqdmulhq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmulh.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmulh.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vqdmulhq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmulh.s8" } } */
+
++/*
++**foo1:
++** ...
++** vqdmulh.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vqdmulhq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmulh.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmullbq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmullbq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmullbt.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vqdmullbq_m_n_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmullbt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmullbt.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vqdmullbq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmullbt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmullbq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmullbq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmullbt.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo (int64x2_t inactive, int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqdmullbq_m_n_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmullbt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmullbt.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo1 (int64x2_t inactive, int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqdmullbq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmullbt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmullbq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmullbq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmullbt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqdmullbq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmullbt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmullbt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqdmullbq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmullbt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmullbq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmullbq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmullbt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo (int64x2_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqdmullbq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmullbt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmullbt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo1 (int64x2_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqdmullbq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmullbt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmullbq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmullbq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmullb.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int16x8_t a, int16_t b)
+ {
+ return vqdmullbq_n_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmullb.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqdmullb.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int16x8_t a, int16_t b)
+ {
+ return vqdmullbq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmullb.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmullbq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmullbq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmullb.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo (int32x4_t a, int32_t b)
+ {
+ return vqdmullbq_n_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmullb.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqdmullb.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo1 (int32x4_t a, int32_t b)
+ {
+ return vqdmullbq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmullb.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmullbq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmullbq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmullb.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vqdmullbq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmullb.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqdmullb.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vqdmullbq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmullb.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmullbq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmullbq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmullb.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vqdmullbq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmullb.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqdmullb.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vqdmullbq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmullb.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulltq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulltq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmulltt.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vqdmulltq_m_n_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmulltt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmulltt.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vqdmulltq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmulltt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulltq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulltq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmulltt.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo (int64x2_t inactive, int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqdmulltq_m_n_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmulltt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmulltt.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo1 (int64x2_t inactive, int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqdmulltq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmulltt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulltq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulltq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmulltt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqdmulltq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmulltt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmulltt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqdmulltq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmulltt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulltq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulltq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmulltt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo (int64x2_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqdmulltq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmulltt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqdmulltt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo1 (int64x2_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqdmulltq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqdmulltt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulltq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulltq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmullt.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int16x8_t a, int16_t b)
+ {
+ return vqdmulltq_n_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmullt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqdmullt.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int16x8_t a, int16_t b)
+ {
+ return vqdmulltq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmullt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulltq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulltq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmullt.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo (int32x4_t a, int32_t b)
+ {
+ return vqdmulltq_n_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmullt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqdmullt.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo1 (int32x4_t a, int32_t b)
+ {
+ return vqdmulltq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmullt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulltq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulltq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmullt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vqdmulltq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmullt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqdmullt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vqdmulltq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmullt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulltq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulltq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqdmullt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vqdmulltq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmullt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqdmullt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vqdmulltq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqdmullt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovnbq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovnbq_m_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqmovnbt.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqmovnbq_m_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqmovnbt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqmovnbt.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqmovnbq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovnbq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovnbq_m_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqmovnbt.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqmovnbq_m_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqmovnbt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqmovnbt.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqmovnbq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovnbq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovnbq_m_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqmovnbt.u16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vqmovnbq_m_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqmovnbt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqmovnbt.u16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vqmovnbq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovnbq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovnbq_m_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqmovnbt.u32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vqmovnbq_m_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqmovnbt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqmovnbt.u32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vqmovnbq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovnbq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovnbq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqmovnb.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int16x8_t b)
+ {
+ return vqmovnbq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqmovnb.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqmovnb.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int16x8_t b)
+ {
+ return vqmovnbq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqmovnb.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovnbq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovnbq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqmovnb.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32x4_t b)
+ {
+ return vqmovnbq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqmovnb.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqmovnb.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32x4_t b)
+ {
+ return vqmovnbq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqmovnb.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovnbq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovnbq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqmovnb.u16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint16x8_t b)
+ {
+ return vqmovnbq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqmovnb.u16" } } */
+
++/*
++**foo1:
++** ...
++** vqmovnb.u16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint16x8_t b)
+ {
+ return vqmovnbq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqmovnb.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovnbq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovnbq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqmovnb.u32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint32x4_t b)
+ {
+ return vqmovnbq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqmovnb.u32" } } */
+
++/*
++**foo1:
++** ...
++** vqmovnb.u32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint32x4_t b)
+ {
+ return vqmovnbq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqmovnb.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovntq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovntq_m_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqmovntt.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqmovntq_m_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqmovntt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqmovntt.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqmovntq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovntq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovntq_m_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqmovntt.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqmovntq_m_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqmovntt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqmovntt.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqmovntq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovntq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovntq_m_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqmovntt.u16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vqmovntq_m_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqmovntt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqmovntt.u16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vqmovntq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovntq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovntq_m_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqmovntt.u32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vqmovntq_m_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqmovntt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqmovntt.u32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vqmovntq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovntq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovntq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqmovnt.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int16x8_t b)
+ {
+ return vqmovntq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqmovnt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqmovnt.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int16x8_t b)
+ {
+ return vqmovntq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqmovnt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovntq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovntq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqmovnt.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32x4_t b)
+ {
+ return vqmovntq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqmovnt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqmovnt.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32x4_t b)
+ {
+ return vqmovntq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqmovnt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovntq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovntq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqmovnt.u16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint16x8_t b)
+ {
+ return vqmovntq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqmovnt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vqmovnt.u16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint16x8_t b)
+ {
+ return vqmovntq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqmovnt.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovntq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovntq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqmovnt.u32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint32x4_t b)
+ {
+ return vqmovntq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqmovnt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vqmovnt.u32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint32x4_t b)
+ {
+ return vqmovntq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqmovnt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovunbq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovunbq_m_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqmovunbt.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqmovunbq_m_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqmovunbt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqmovunbt.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqmovunbq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovunbq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovunbq_m_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqmovunbt.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqmovunbq_m_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqmovunbt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqmovunbt.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqmovunbq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovunbq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovunbq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqmovunb.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int16x8_t b)
+ {
+ return vqmovunbq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqmovunb.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqmovunb.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, int16x8_t b)
+ {
+ return vqmovunbq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqmovunb.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovunbq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovunbq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqmovunb.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int32x4_t b)
+ {
+ return vqmovunbq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqmovunb.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqmovunb.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, int32x4_t b)
+ {
+ return vqmovunbq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqmovunb.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovuntq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovuntq_m_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqmovuntt.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqmovuntq_m_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqmovuntt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqmovuntt.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqmovuntq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovuntq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovuntq_m_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqmovuntt.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqmovuntq_m_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqmovuntt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqmovuntt.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqmovuntq_m (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovuntq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovuntq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqmovunt.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int16x8_t b)
+ {
+ return vqmovuntq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqmovunt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqmovunt.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, int16x8_t b)
+ {
+ return vqmovuntq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqmovunt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovuntq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqmovuntq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqmovunt.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int32x4_t b)
+ {
+ return vqmovuntq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqmovunt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqmovunt.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, int32x4_t b)
+ {
+ return vqmovuntq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqmovunt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqnegq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqnegq_m_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqnegt.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vqnegq_m_s16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqnegt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqnegt.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vqnegq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqnegq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqnegq_m_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqnegt.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, mve_pred16_t p)
+ {
+ return vqnegq_m_s32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqnegt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqnegt.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, mve_pred16_t p)
+ {
+ return vqnegq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqnegq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqnegq_m_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqnegt.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vqnegq_m_s8 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqnegt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqnegt.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vqnegq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqnegq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqnegq_s16.c
+@@ -1,21 +1,41 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqneg.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a)
+ {
+ return vqnegq_s16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vqneg.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqneg.s16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a)
+ {
+ return vqnegq (a);
+ }
+
+-/* { dg-final { scan-assembler "vqneg.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqnegq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqnegq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqneg.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a)
+ {
+ return vqnegq_s32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vqneg.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqneg.s32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a)
+ {
+ return vqnegq (a);
+ }
+
+-/* { dg-final { scan-assembler "vqneg.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqnegq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqnegq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqneg.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a)
+ {
+ return vqnegq_s8 (a);
+ }
+
+-/* { dg-final { scan-assembler "vqneg.s8" } } */
+
++/*
++**foo1:
++** ...
++** vqneg.s8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a)
+ {
+ return vqnegq (a);
+ }
+
+-/* { dg-final { scan-assembler "vqneg.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmladht.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqrdmladhq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmladht.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmladht.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqrdmladhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmladht.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmladht.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqrdmladhq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmladht.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmladht.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqrdmladhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmladht.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhq_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmladht.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqrdmladhq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmladht.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmladht.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqrdmladhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmladht.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrdmladh.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b)
+ {
+ return vqrdmladhq_s16 (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmladh.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqrdmladh.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b)
+ {
+ return vqrdmladhq (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmladh.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrdmladh.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b)
+ {
+ return vqrdmladhq_s32 (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmladh.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqrdmladh.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b)
+ {
+ return vqrdmladhq (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmladh.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrdmladh.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b)
+ {
+ return vqrdmladhq_s8 (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmladh.s8" } } */
+
++/*
++**foo1:
++** ...
++** vqrdmladh.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b)
+ {
+ return vqrdmladhq (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmladh.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhxq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhxq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmladhxt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqrdmladhxq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmladhxt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmladhxt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqrdmladhxq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmladhxt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhxq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhxq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmladhxt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqrdmladhxq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmladhxt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmladhxt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqrdmladhxq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmladhxt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhxq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhxq_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmladhxt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqrdmladhxq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmladhxt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmladhxt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqrdmladhxq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmladhxt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhxq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhxq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrdmladhx.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b)
+ {
+ return vqrdmladhxq_s16 (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmladhx.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqrdmladhx.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b)
+ {
+ return vqrdmladhxq (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmladhx.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhxq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhxq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrdmladhx.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b)
+ {
+ return vqrdmladhxq_s32 (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmladhx.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqrdmladhx.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b)
+ {
+ return vqrdmladhxq (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmladhx.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhxq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhxq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrdmladhx.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b)
+ {
+ return vqrdmladhxq_s8 (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmladhx.s8" } } */
+
++/*
++**foo1:
++** ...
++** vqrdmladhx.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b)
+ {
+ return vqrdmladhxq (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmladhx.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmlaht.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo (int16x8_t a, int16x8_t b, int16_t c, mve_pred16_t p)
++foo (int16x8_t add, int16x8_t m1, int16_t m2, mve_pred16_t p)
+ {
+- return vqrdmlahq_m_n_s16 (a, b, c, p);
++ return vqrdmlahq_m_n_s16 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmlaht.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmlaht.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo1 (int16x8_t a, int16x8_t b, int16_t c, mve_pred16_t p)
++foo1 (int16x8_t add, int16x8_t m1, int16_t m2, mve_pred16_t p)
+ {
+- return vqrdmlahq_m (a, b, c, p);
++ return vqrdmlahq_m (add, m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmlaht.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmlaht.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int32x4_t a, int32x4_t b, int32_t c, mve_pred16_t p)
++foo (int32x4_t add, int32x4_t m1, int32_t m2, mve_pred16_t p)
+ {
+- return vqrdmlahq_m_n_s32 (a, b, c, p);
++ return vqrdmlahq_m_n_s32 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmlaht.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmlaht.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int32x4_t a, int32x4_t b, int32_t c, mve_pred16_t p)
++foo1 (int32x4_t add, int32x4_t m1, int32_t m2, mve_pred16_t p)
+ {
+- return vqrdmlahq_m (a, b, c, p);
++ return vqrdmlahq_m (add, m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmlaht.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_m_n_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmlaht.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo (int8x16_t a, int8x16_t b, int8_t c, mve_pred16_t p)
++foo (int8x16_t add, int8x16_t m1, int8_t m2, mve_pred16_t p)
+ {
+- return vqrdmlahq_m_n_s8 (a, b, c, p);
++ return vqrdmlahq_m_n_s8 (add, m1, m2, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmlaht.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmlaht.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo1 (int8x16_t a, int8x16_t b, int8_t c, mve_pred16_t p)
++foo1 (int8x16_t add, int8x16_t m1, int8_t m2, mve_pred16_t p)
+ {
+- return vqrdmlahq_m (a, b, c, p);
++ return vqrdmlahq_m (add, m1, m2, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmlaht.s8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrdmlah.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo (int16x8_t a, int16x8_t b, int16_t c)
++foo (int16x8_t add, int16x8_t m1, int16_t m2)
+ {
+- return vqrdmlahq_n_s16 (a, b, c);
++ return vqrdmlahq_n_s16 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmlah.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqrdmlah.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo1 (int16x8_t a, int16x8_t b, int16_t c)
++foo1 (int16x8_t add, int16x8_t m1, int16_t m2)
+ {
+- return vqrdmlahq (a, b, c);
++ return vqrdmlahq (add, m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vqrdmlah.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrdmlah.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int32x4_t a, int32x4_t b, int32_t c)
++foo (int32x4_t add, int32x4_t m1, int32_t m2)
+ {
+- return vqrdmlahq_n_s32 (a, b, c);
++ return vqrdmlahq_n_s32 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmlah.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqrdmlah.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int32x4_t a, int32x4_t b, int32_t c)
++foo1 (int32x4_t add, int32x4_t m1, int32_t m2)
+ {
+- return vqrdmlahq (a, b, c);
++ return vqrdmlahq (add, m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vqrdmlah.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrdmlah.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo (int8x16_t a, int8x16_t b, int8_t c)
++foo (int8x16_t add, int8x16_t m1, int8_t m2)
+ {
+- return vqrdmlahq_n_s8 (a, b, c);
++ return vqrdmlahq_n_s8 (add, m1, m2);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmlah.s8" } } */
+
++/*
++**foo1:
++** ...
++** vqrdmlah.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo1 (int8x16_t a, int8x16_t b, int8_t c)
++foo1 (int8x16_t add, int8x16_t m1, int8_t m2)
+ {
+- return vqrdmlahq (a, b, c);
++ return vqrdmlahq (add, m1, m2);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vqrdmlah.s8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmlasht.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo (int16x8_t a, int16x8_t b, int16_t c, mve_pred16_t p)
++foo (int16x8_t m1, int16x8_t m2, int16_t add, mve_pred16_t p)
+ {
+- return vqrdmlashq_m_n_s16 (a, b, c, p);
++ return vqrdmlashq_m_n_s16 (m1, m2, add, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmlasht.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmlasht.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo1 (int16x8_t a, int16x8_t b, int16_t c, mve_pred16_t p)
++foo1 (int16x8_t m1, int16x8_t m2, int16_t add, mve_pred16_t p)
+ {
+- return vqrdmlashq_m (a, b, c, p);
++ return vqrdmlashq_m (m1, m2, add, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmlasht.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmlasht.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int32x4_t a, int32x4_t b, int32_t c, mve_pred16_t p)
++foo (int32x4_t m1, int32x4_t m2, int32_t add, mve_pred16_t p)
+ {
+- return vqrdmlashq_m_n_s32 (a, b, c, p);
++ return vqrdmlashq_m_n_s32 (m1, m2, add, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmlasht.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmlasht.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int32x4_t a, int32x4_t b, int32_t c, mve_pred16_t p)
++foo1 (int32x4_t m1, int32x4_t m2, int32_t add, mve_pred16_t p)
+ {
+- return vqrdmlashq_m (a, b, c, p);
++ return vqrdmlashq_m (m1, m2, add, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmlasht.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_m_n_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmlasht.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo (int8x16_t a, int8x16_t b, int8_t c, mve_pred16_t p)
++foo (int8x16_t m1, int8x16_t m2, int8_t add, mve_pred16_t p)
+ {
+- return vqrdmlashq_m_n_s8 (a, b, c, p);
++ return vqrdmlashq_m_n_s8 (m1, m2, add, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmlasht.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmlasht.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo1 (int8x16_t a, int8x16_t b, int8_t c, mve_pred16_t p)
++foo1 (int8x16_t m1, int8x16_t m2, int8_t add, mve_pred16_t p)
+ {
+- return vqrdmlashq_m (a, b, c, p);
++ return vqrdmlashq_m (m1, m2, add, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmlasht.s8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrdmlash.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo (int16x8_t a, int16x8_t b, int16_t c)
++foo (int16x8_t m1, int16x8_t m2, int16_t add)
+ {
+- return vqrdmlashq_n_s16 (a, b, c);
++ return vqrdmlashq_n_s16 (m1, m2, add);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmlash.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqrdmlash.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo1 (int16x8_t a, int16x8_t b, int16_t c)
++foo1 (int16x8_t m1, int16x8_t m2, int16_t add)
+ {
+- return vqrdmlashq (a, b, c);
++ return vqrdmlashq (m1, m2, add);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vqrdmlash.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrdmlash.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int32x4_t a, int32x4_t b, int32_t c)
++foo (int32x4_t m1, int32x4_t m2, int32_t add)
+ {
+- return vqrdmlashq_n_s32 (a, b, c);
++ return vqrdmlashq_n_s32 (m1, m2, add);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmlash.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqrdmlash.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int32x4_t a, int32x4_t b, int32_t c)
++foo1 (int32x4_t m1, int32x4_t m2, int32_t add)
+ {
+- return vqrdmlashq (a, b, c);
++ return vqrdmlashq (m1, m2, add);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vqrdmlash.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_n_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrdmlash.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo (int8x16_t a, int8x16_t b, int8_t c)
++foo (int8x16_t m1, int8x16_t m2, int8_t add)
+ {
+- return vqrdmlashq_n_s8 (a, b, c);
++ return vqrdmlashq_n_s8 (m1, m2, add);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmlash.s8" } } */
+
++/*
++**foo1:
++** ...
++** vqrdmlash.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo1 (int8x16_t a, int8x16_t b, int8_t c)
++foo1 (int8x16_t m1, int8x16_t m2, int8_t add)
+ {
+- return vqrdmlashq (a, b, c);
++ return vqrdmlashq (m1, m2, add);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vqrdmlash.s8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmlsdht.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqrdmlsdhq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmlsdht.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmlsdht.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqrdmlsdhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmlsdht.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmlsdht.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqrdmlsdhq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmlsdht.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmlsdht.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqrdmlsdhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmlsdht.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhq_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmlsdht.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqrdmlsdhq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmlsdht.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmlsdht.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqrdmlsdhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmlsdht.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrdmlsdh.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b)
+ {
+ return vqrdmlsdhq_s16 (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmlsdh.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqrdmlsdh.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b)
+ {
+ return vqrdmlsdhq (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmlsdh.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrdmlsdh.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b)
+ {
+ return vqrdmlsdhq_s32 (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmlsdh.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqrdmlsdh.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b)
+ {
+ return vqrdmlsdhq (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmlsdh.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrdmlsdh.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b)
+ {
+ return vqrdmlsdhq_s8 (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmlsdh.s8" } } */
+
++/*
++**foo1:
++** ...
++** vqrdmlsdh.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b)
+ {
+ return vqrdmlsdhq (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmlsdh.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmlsdhxt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqrdmlsdhxq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmlsdhxt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmlsdhxt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqrdmlsdhxq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmlsdhxt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmlsdhxt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqrdmlsdhxq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmlsdhxt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmlsdhxt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqrdmlsdhxq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmlsdhxt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmlsdhxt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqrdmlsdhxq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmlsdhxt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmlsdhxt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqrdmlsdhxq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmlsdhxt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrdmlsdhx.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b)
+ {
+ return vqrdmlsdhxq_s16 (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmlsdhx.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqrdmlsdhx.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b)
+ {
+ return vqrdmlsdhxq (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmlsdhx.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrdmlsdhx.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b)
+ {
+ return vqrdmlsdhxq_s32 (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmlsdhx.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqrdmlsdhx.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b)
+ {
+ return vqrdmlsdhxq (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmlsdhx.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrdmlsdhx.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b)
+ {
+ return vqrdmlsdhxq_s8 (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmlsdhx.s8" } } */
+
++/*
++**foo1:
++** ...
++** vqrdmlsdhx.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b)
+ {
+ return vqrdmlsdhxq (inactive, a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmlsdhx.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmulht.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vqrdmulhq_m_n_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmulht.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmulht.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vqrdmulhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmulht.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmulht.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqrdmulhq_m_n_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmulht.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmulht.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqrdmulhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmulht.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_m_n_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmulht.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vqrdmulhq_m_n_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmulht.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmulht.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vqrdmulhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmulht.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmulht.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqrdmulhq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmulht.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmulht.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqrdmulhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmulht.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmulht.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqrdmulhq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmulht.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmulht.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqrdmulhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmulht.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmulht.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqrdmulhq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmulht.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrdmulht.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqrdmulhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrdmulht.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrdmulh.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16_t b)
+ {
+ return vqrdmulhq_n_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmulh.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqrdmulh.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16_t b)
+ {
+ return vqrdmulhq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmulh.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrdmulh.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32_t b)
+ {
+ return vqrdmulhq_n_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmulh.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqrdmulh.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32_t b)
+ {
+ return vqrdmulhq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmulh.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_n_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrdmulh.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8_t b)
+ {
+ return vqrdmulhq_n_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmulh.s8" } } */
+
++/*
++**foo1:
++** ...
++** vqrdmulh.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8_t b)
+ {
+ return vqrdmulhq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmulh.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrdmulh.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vqrdmulhq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmulh.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqrdmulh.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vqrdmulhq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmulh.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrdmulh.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vqrdmulhq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmulh.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqrdmulh.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vqrdmulhq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmulh.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrdmulh.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vqrdmulhq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmulh.s8" } } */
+
++/*
++**foo1:
++** ...
++** vqrdmulh.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vqrdmulhq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrdmulh.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshlt.s16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqrshlq_m_n_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshlt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshlt.s16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqrshlq_m_n (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshlt.s32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqrshlq_m_n_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshlt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshlt.s32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqrshlq_m_n (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshlt.s8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqrshlq_m_n_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshlt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshlt.s8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqrshlq_m_n (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshlt.u16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqrshlq_m_n_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshlt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshlt.u16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqrshlq_m_n (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshlt.u32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqrshlq_m_n_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshlt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshlt.u32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqrshlq_m_n (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshlt.u8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqrshlq_m_n_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshlt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshlt.u8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqrshlq_m_n (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshlt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqrshlq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshlt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshlt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqrshlq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshlt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshlt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqrshlq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshlt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshlt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqrshlq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshlt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshlt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqrshlq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshlt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshlt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqrshlq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshlt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshlt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqrshlq_m_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshlt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshlt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqrshlq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshlt.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshlt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqrshlq_m_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshlt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshlt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqrshlq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshlt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshlt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqrshlq_m_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshlt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshlt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqrshlq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshlt.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrshl.s16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32_t b)
+ {
+ return vqrshlq_n_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrshl.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqrshl.s16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32_t b)
+ {
+ return vqrshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrshl.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrshl.s32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32_t b)
+ {
+ return vqrshlq_n_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrshl.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqrshl.s32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32_t b)
+ {
+ return vqrshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrshl.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrshl.s8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int32_t b)
+ {
+ return vqrshlq_n_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrshl.s8" } } */
+
++/*
++**foo1:
++** ...
++** vqrshl.s8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int32_t b)
+ {
+ return vqrshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrshl.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrshl.u16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int32_t b)
+ {
+ return vqrshlq_n_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrshl.u16" } } */
+
++/*
++**foo1:
++** ...
++** vqrshl.u16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, int32_t b)
+ {
+ return vqrshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrshl.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrshl.u32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, int32_t b)
+ {
+ return vqrshlq_n_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrshl.u32" } } */
+
++/*
++**foo1:
++** ...
++** vqrshl.u32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, int32_t b)
+ {
+ return vqrshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrshl.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrshl.u8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int32_t b)
+ {
+ return vqrshlq_n_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrshl.u8" } } */
+
++/*
++**foo1:
++** ...
++** vqrshl.u8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, int32_t b)
+ {
+ return vqrshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrshl.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrshl.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vqrshlq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrshl.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqrshl.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vqrshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrshl.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrshl.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vqrshlq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrshl.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqrshl.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vqrshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrshl.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrshl.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vqrshlq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrshl.s8" } } */
+
++/*
++**foo1:
++** ...
++** vqrshl.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vqrshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrshl.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrshl.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int16x8_t b)
+ {
+ return vqrshlq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrshl.u16" } } */
+
++/*
++**foo1:
++** ...
++** vqrshl.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, int16x8_t b)
+ {
+ return vqrshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrshl.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrshl.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, int32x4_t b)
+ {
+ return vqrshlq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrshl.u32" } } */
+
++/*
++**foo1:
++** ...
++** vqrshl.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, int32x4_t b)
+ {
+ return vqrshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrshl.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrshl.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int8x16_t b)
+ {
+ return vqrshlq_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrshl.u8" } } */
+
++/*
++**foo1:
++** ...
++** vqrshl.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, int8x16_t b)
+ {
+ return vqrshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqrshl.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrnbq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrnbq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshrnbt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqrshrnbq_m_n_s16 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshrnbt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshrnbt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqrshrnbq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshrnbt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrnbq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrnbq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshrnbt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqrshrnbq_m_n_s32 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshrnbt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshrnbt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqrshrnbq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshrnbt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrnbq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrnbq_m_n_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshrnbt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vqrshrnbq_m_n_u16 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshrnbt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshrnbt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vqrshrnbq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshrnbt.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrnbq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrnbq_m_n_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshrnbt.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vqrshrnbq_m_n_u32 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshrnbt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshrnbt.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vqrshrnbq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshrnbt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrnbq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrnbq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrshrnb.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int16x8_t b)
+ {
+ return vqrshrnbq_n_s16 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqrshrnb.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqrshrnb.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int16x8_t b)
+ {
+ return vqrshrnbq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqrshrnb.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrnbq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrnbq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrshrnb.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32x4_t b)
+ {
+ return vqrshrnbq_n_s32 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqrshrnb.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqrshrnb.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32x4_t b)
+ {
+ return vqrshrnbq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqrshrnb.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrnbq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrnbq_n_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrshrnb.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint16x8_t b)
+ {
+ return vqrshrnbq_n_u16 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqrshrnb.u16" } } */
+
++/*
++**foo1:
++** ...
++** vqrshrnb.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint16x8_t b)
+ {
+ return vqrshrnbq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqrshrnb.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrnbq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrnbq_n_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrshrnb.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint32x4_t b)
+ {
+ return vqrshrnbq_n_u32 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqrshrnb.u32" } } */
+
++/*
++**foo1:
++** ...
++** vqrshrnb.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint32x4_t b)
+ {
+ return vqrshrnbq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqrshrnb.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrntq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrntq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshrntt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqrshrntq_m_n_s16 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshrntt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshrntt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqrshrntq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshrntt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrntq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrntq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshrntt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqrshrntq_m_n_s32 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshrntt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshrntt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqrshrntq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshrntt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrntq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrntq_m_n_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshrntt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vqrshrntq_m_n_u16 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshrntt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshrntt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vqrshrntq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshrntt.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrntq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrntq_m_n_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshrntt.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vqrshrntq_m_n_u32 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshrntt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshrntt.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vqrshrntq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshrntt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrntq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrntq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrshrnt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int16x8_t b)
+ {
+ return vqrshrntq_n_s16 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqrshrnt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqrshrnt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int16x8_t b)
+ {
+ return vqrshrntq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqrshrnt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrntq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrntq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrshrnt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32x4_t b)
+ {
+ return vqrshrntq_n_s32 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqrshrnt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqrshrnt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32x4_t b)
+ {
+ return vqrshrntq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqrshrnt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrntq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrntq_n_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrshrnt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint16x8_t b)
+ {
+ return vqrshrntq_n_u16 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqrshrnt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vqrshrnt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint16x8_t b)
+ {
+ return vqrshrntq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqrshrnt.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrntq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrntq_n_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrshrnt.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint32x4_t b)
+ {
+ return vqrshrntq_n_u32 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqrshrnt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vqrshrnt.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint32x4_t b)
+ {
+ return vqrshrntq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqrshrnt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrunbq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrunbq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshrunbt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqrshrunbq_m_n_s16 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshrunbt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshrunbt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqrshrunbq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshrunbt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrunbq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrunbq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshrunbt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqrshrunbq_m_n_s32 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshrunbt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshrunbt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqrshrunbq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshrunbt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrunbq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrunbq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrshrunb.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int16x8_t b)
+ {
+ return vqrshrunbq_n_s16 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqrshrunb.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqrshrunb.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, int16x8_t b)
+ {
+ return vqrshrunbq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqrshrunb.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrunbq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshrunbq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrshrunb.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int32x4_t b)
+ {
+ return vqrshrunbq_n_s32 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqrshrunb.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqrshrunb.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, int32x4_t b)
+ {
+ return vqrshrunbq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqrshrunb.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshruntq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshruntq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshruntt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqrshruntq_m_n_s16 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshruntt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshruntt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqrshruntq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshruntt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshruntq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshruntq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshruntt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqrshruntq_m_n_s32 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshruntt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqrshruntt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqrshruntq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqrshruntt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshruntq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshruntq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrshrunt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int16x8_t b)
+ {
+ return vqrshruntq_n_s16 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqrshrunt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqrshrunt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, int16x8_t b)
+ {
+ return vqrshruntq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqrshrunt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshruntq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshruntq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqrshrunt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int32x4_t b)
+ {
+ return vqrshruntq_n_s32 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqrshrunt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqrshrunt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, int32x4_t b)
+ {
+ return vqrshruntq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqrshrunt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vqshlq_m_n_s16 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vqshlq_m_n (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, mve_pred16_t p)
+ {
+ return vqshlq_m_n_s32 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, mve_pred16_t p)
+ {
+ return vqshlq_m_n (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_n_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vqshlq_m_n_s8 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vqshlq_m_n (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_n_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, mve_pred16_t p)
+ {
+ return vqshlq_m_n_u16 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, mve_pred16_t p)
+ {
+ return vqshlq_m_n (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlt.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_n_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, mve_pred16_t p)
+ {
+ return vqshlq_m_n_u32 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, mve_pred16_t p)
+ {
+ return vqshlq_m_n (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_n_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
+ {
+ return vqshlq_m_n_u8 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
+ {
+ return vqshlq_m_n (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlt.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.s16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqshlq_m_r_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.s16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqshlq_m_r (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.s32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqshlq_m_r_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.s32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqshlq_m_r (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.s8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqshlq_m_r_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.s8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqshlq_m_r (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.u16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqshlq_m_r_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.u16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqshlq_m_r (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.u32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqshlq_m_r_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.u32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqshlq_m_r (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.u8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqshlq_m_r_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.u8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqshlq_m_r (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqshlq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqshlq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqshlq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqshlq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqshlq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqshlq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqshlq_m_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqshlq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlt.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqshlq_m_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqshlq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqshlq_m_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqshlq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlt.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshl.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a)
+ {
+ return vqshlq_n_s16 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqshl.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a)
+ {
+ return vqshlq_n (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshl.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a)
+ {
+ return vqshlq_n_s32 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqshl.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a)
+ {
+ return vqshlq_n (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshl.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a)
+ {
+ return vqshlq_n_s8 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.s8" } } */
+
++/*
++**foo1:
++** ...
++** vqshl.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a)
+ {
+ return vqshlq_n (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshl.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a)
+ {
+ return vqshlq_n_u16 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.u16" } } */
+
++/*
++**foo1:
++** ...
++** vqshl.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a)
+ {
+ return vqshlq_n (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshl.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a)
+ {
+ return vqshlq_n_u32 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.u32" } } */
+
++/*
++**foo1:
++** ...
++** vqshl.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a)
+ {
+ return vqshlq_n (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshl.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a)
+ {
+ return vqshlq_n_u8 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.u8" } } */
+
++/*
++**foo1:
++** ...
++** vqshl.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a)
+ {
+ return vqshlq_n (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshl.s16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32_t b)
+ {
+ return vqshlq_r_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqshl.s16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32_t b)
+ {
+ return vqshlq_r (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshl.s32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32_t b)
+ {
+ return vqshlq_r_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqshl.s32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32_t b)
+ {
+ return vqshlq_r (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshl.s8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int32_t b)
+ {
+ return vqshlq_r_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.s8" } } */
+
++/*
++**foo1:
++** ...
++** vqshl.s8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int32_t b)
+ {
+ return vqshlq_r (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshl.u16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int32_t b)
+ {
+ return vqshlq_r_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.u16" } } */
+
++/*
++**foo1:
++** ...
++** vqshl.u16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, int32_t b)
+ {
+ return vqshlq_r (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshl.u32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, int32_t b)
+ {
+ return vqshlq_r_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.u32" } } */
+
++/*
++**foo1:
++** ...
++** vqshl.u32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, int32_t b)
+ {
+ return vqshlq_r (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshl.u8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int32_t b)
+ {
+ return vqshlq_r_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.u8" } } */
+
++/*
++**foo1:
++** ...
++** vqshl.u8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, int32_t b)
+ {
+ return vqshlq_r (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshl.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vqshlq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqshl.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vqshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshl.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vqshlq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqshl.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vqshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshl.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vqshlq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.s8" } } */
+
++/*
++**foo1:
++** ...
++** vqshl.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vqshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshl.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int16x8_t b)
+ {
+ return vqshlq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.u16" } } */
+
++/*
++**foo1:
++** ...
++** vqshl.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, int16x8_t b)
+ {
+ return vqshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshl.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, int32x4_t b)
+ {
+ return vqshlq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.u32" } } */
+
++/*
++**foo1:
++** ...
++** vqshl.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, int32x4_t b)
+ {
+ return vqshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshl.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int8x16_t b)
+ {
+ return vqshlq_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.u8" } } */
+
++/*
++**foo1:
++** ...
++** vqshl.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, int8x16_t b)
+ {
+ return vqshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqshl.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshluq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshluq_m_n_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlut.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+- return vqshluq_m_n_s16 (inactive, a, 7, p);
++ return vqshluq_m_n_s16 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlut.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlut.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+- return vqshluq_m (inactive, a, 7, p);
++ return vqshluq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshluq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshluq_m_n_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlut.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, int32x4_t a, mve_pred16_t p)
+ {
+- return vqshluq_m_n_s32 (inactive, a, 7, p);
++ return vqshluq_m_n_s32 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlut.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlut.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, int32x4_t a, mve_pred16_t p)
+ {
+- return vqshluq_m (inactive, a, 7, p);
++ return vqshluq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshluq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshluq_m_n_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlut.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+- return vqshluq_m_n_s8 (inactive, a, 7, p);
++ return vqshluq_m_n_s8 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshlut.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshlut.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+- return vqshluq_m (inactive, a, 7, p);
++ return vqshluq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshluq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshluq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshlu.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (int16x8_t a)
+ {
+- return vqshluq_n_s16 (a, 7);
++ return vqshluq_n_s16 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshlu.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqshlu.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (int16x8_t a)
+ {
+- return vqshluq (a, 7);
++ return vqshluq (a, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vqshlu.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshluq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshluq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshlu.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (int32x4_t a)
+ {
+- return vqshluq_n_s32 (a, 7);
++ return vqshluq_n_s32 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshlu.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqshlu.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (int32x4_t a)
+ {
+- return vqshluq (a, 7);
++ return vqshluq (a, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vqshlu.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshluq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshluq_n_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshlu.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (int8x16_t a)
+ {
+- return vqshluq_n_s8 (a, 7);
++ return vqshluq_n_s8 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshlu.s8" } } */
+
++/*
++**foo1:
++** ...
++** vqshlu.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (int8x16_t a)
+ {
+- return vqshluq (a, 7);
++ return vqshluq (a, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vqshlu.s8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrnbq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrnbq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshrnbt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+- return vqshrnbq_m_n_s16 (a, b, 7, p);
++ return vqshrnbq_m_n_s16 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshrnbt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshrnbt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+- return vqshrnbq_m (a, b, 7, p);
++ return vqshrnbq_m (a, b, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshrnbt.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrnbq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrnbq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshrnbt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+- return vqshrnbq_m_n_s32 (a, b, 11, p);
++ return vqshrnbq_m_n_s32 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshrnbt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshrnbt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+- return vqshrnbq_m (a, b, 11, p);
++ return vqshrnbq_m (a, b, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshrnbt.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrnbq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrnbq_m_n_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshrnbt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vqshrnbq_m_n_u16 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshrnbt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshrnbt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vqshrnbq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshrnbt.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrnbq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrnbq_m_n_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshrnbt.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vqshrnbq_m_n_u32 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshrnbt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshrnbt.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vqshrnbq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshrnbt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrnbq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrnbq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshrnb.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int16x8_t b)
+ {
+ return vqshrnbq_n_s16 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshrnb.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqshrnb.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int16x8_t b)
+ {
+ return vqshrnbq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshrnb.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrnbq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrnbq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshrnb.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32x4_t b)
+ {
+- return vqshrnbq_n_s32 (a, b, 2);
++ return vqshrnbq_n_s32 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshrnb.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqshrnb.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32x4_t b)
+ {
+- return vqshrnbq (a, b, 2);
++ return vqshrnbq (a, b, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vqshrnb.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrnbq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrnbq_n_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshrnb.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint16x8_t b)
+ {
+ return vqshrnbq_n_u16 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshrnb.u16" } } */
+
++/*
++**foo1:
++** ...
++** vqshrnb.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint16x8_t b)
+ {
+ return vqshrnbq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshrnb.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrnbq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrnbq_n_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshrnb.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint32x4_t b)
+ {
+- return vqshrnbq_n_u32 (a, b, 15);
++ return vqshrnbq_n_u32 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshrnb.u32" } } */
+
++/*
++**foo1:
++** ...
++** vqshrnb.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint32x4_t b)
+ {
+- return vqshrnbq (a, b, 15);
++ return vqshrnbq (a, b, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vqshrnb.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrntq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrntq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshrntt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqshrntq_m_n_s16 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshrntt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshrntt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqshrntq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshrntt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrntq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrntq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshrntt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqshrntq_m_n_s32 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshrntt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshrntt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqshrntq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshrntt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrntq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrntq_m_n_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshrntt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vqshrntq_m_n_u16 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshrntt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshrntt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vqshrntq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshrntt.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrntq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrntq_m_n_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshrntt.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vqshrntq_m_n_u32 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshrntt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshrntt.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vqshrntq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshrntt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrntq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrntq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshrnt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int16x8_t b)
+ {
+ return vqshrntq_n_s16 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshrnt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqshrnt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int16x8_t b)
+ {
+ return vqshrntq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshrnt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrntq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrntq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshrnt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32x4_t b)
+ {
+ return vqshrntq_n_s32 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshrnt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqshrnt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32x4_t b)
+ {
+ return vqshrntq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshrnt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrntq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrntq_n_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshrnt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint16x8_t b)
+ {
+ return vqshrntq_n_u16 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshrnt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vqshrnt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint16x8_t b)
+ {
+ return vqshrntq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshrnt.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrntq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrntq_n_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshrnt.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint32x4_t b)
+ {
+ return vqshrntq_n_u32 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshrnt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vqshrnt.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint32x4_t b)
+ {
+ return vqshrntq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshrnt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrunbq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrunbq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshrunbt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqshrunbq_m_n_s16 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshrunbt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshrunbt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqshrunbq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshrunbt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrunbq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrunbq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshrunbt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqshrunbq_m_n_s32 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshrunbt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshrunbt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqshrunbq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshrunbt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrunbq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrunbq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshrunb.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int16x8_t b)
+ {
+ return vqshrunbq_n_s16 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshrunb.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqshrunb.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, int16x8_t b)
+ {
+ return vqshrunbq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshrunb.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrunbq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshrunbq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshrunb.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int32x4_t b)
+ {
+ return vqshrunbq_n_s32 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshrunb.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqshrunb.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, int32x4_t b)
+ {
+ return vqshrunbq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshrunb.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshruntq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshruntq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshruntt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqshruntq_m_n_s16 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshruntt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshruntt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqshruntq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshruntt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshruntq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshruntq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshruntt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqshruntq_m_n_s32 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshruntt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqshruntt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqshruntq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqshruntt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshruntq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshruntq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshrunt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int16x8_t b)
+ {
+ return vqshruntq_n_s16 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshrunt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqshrunt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, int16x8_t b)
+ {
+ return vqshruntq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshrunt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshruntq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshruntq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqshrunt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int32x4_t b)
+ {
+ return vqshruntq_n_s32 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshrunt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqshrunt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, int32x4_t b)
+ {
+ return vqshruntq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vqshrunt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqsubt.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vqsubq_m_n_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqsubt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqsubt.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vqsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqsubt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqsubt.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqsubq_m_n_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqsubt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqsubt.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vqsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqsubt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_m_n_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqsubt.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vqsubq_m_n_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqsubt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqsubt.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vqsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqsubt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_m_n_u16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqsubt.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, uint16_t b, mve_pred16_t p)
+ {
+ return vqsubq_m_n_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqsubt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqsubt.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, uint16_t b, mve_pred16_t p)
+ {
+ return vqsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqsubt.u16" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqsubt.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t inactive, uint16x8_t a, mve_pred16_t p)
++{
++ return vqsubq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_m_n_u32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqsubt.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vqsubq_m_n_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqsubt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqsubt.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vqsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqsubt.u32" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqsubt.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t inactive, uint32x4_t a, mve_pred16_t p)
++{
++ return vqsubq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_m_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_m_n_u8.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqsubt.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, uint8_t b, mve_pred16_t p)
+ {
+ return vqsubq_m_n_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqsubt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqsubt.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, uint8_t b, mve_pred16_t p)
+ {
+ return vqsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqsubt.u8" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqsubt.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
++{
++ return vqsubq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqsubt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqsubq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqsubt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqsubt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vqsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqsubt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqsubt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqsubq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqsubt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqsubt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vqsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqsubt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqsubt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqsubq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqsubt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqsubt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vqsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqsubt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_m_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqsubt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vqsubq_m_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqsubt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqsubt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vqsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqsubt.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_m_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqsubt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vqsubq_m_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqsubt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqsubt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vqsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqsubt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_m_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqsubt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vqsubq_m_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqsubt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vqsubt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vqsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vqsubt.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqsub.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16_t b)
+ {
+ return vqsubq_n_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqsub.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqsub.s16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16_t b)
+ {
+ return vqsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqsub.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqsub.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32_t b)
+ {
+ return vqsubq_n_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqsub.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqsub.s32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32_t b)
+ {
+ return vqsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqsub.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqsub.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8_t b)
+ {
+ return vqsubq_n_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqsub.s8" } } */
+
++/*
++**foo1:
++** ...
++** vqsub.s8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8_t b)
+ {
+ return vqsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqsub.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_u16.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqsub.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16_t b)
+ {
+ return vqsubq_n_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqsub.u16" } } */
+
++/*
++**foo1:
++** ...
++** vqsub.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16_t b)
+ {
+ return vqsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqsub.u16" } } */
++/*
++**foo2:
++** ...
++** vqsub.u16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t a)
++{
++ return vqsubq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_u32.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqsub.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32_t b)
+ {
+ return vqsubq_n_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqsub.u32" } } */
+
++/*
++**foo1:
++** ...
++** vqsub.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32_t b)
+ {
+ return vqsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqsub.u32" } } */
++/*
++**foo2:
++** ...
++** vqsub.u32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t a)
++{
++ return vqsubq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_u8.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqsub.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8_t b)
+ {
+ return vqsubq_n_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqsub.u8" } } */
+
++/*
++**foo1:
++** ...
++** vqsub.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8_t b)
+ {
+ return vqsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqsub.u8" } } */
++/*
++**foo2:
++** ...
++** vqsub.u8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t a)
++{
++ return vqsubq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqsub.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vqsubq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqsub.s16" } } */
+
++/*
++**foo1:
++** ...
++** vqsub.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vqsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqsub.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqsub.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vqsubq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqsub.s32" } } */
+
++/*
++**foo1:
++** ...
++** vqsub.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vqsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqsub.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqsub.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vqsubq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqsub.s8" } } */
+
++/*
++**foo1:
++** ...
++** vqsub.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vqsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqsub.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqsub.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b)
+ {
+ return vqsubq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqsub.u16" } } */
+
++/*
++**foo1:
++** ...
++** vqsub.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b)
+ {
+ return vqsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqsub.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqsub.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b)
+ {
+ return vqsubq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqsub.u32" } } */
+
++/*
++**foo1:
++** ...
++** vqsub.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b)
+ {
+ return vqsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqsub.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vqsub.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+ return vqsubq_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqsub.u8" } } */
+
++/*
++**foo1:
++** ...
++** vqsub.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b)
+ {
+ return vqsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vqsub.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev16q_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev16q_m_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev16t.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vrev16q_m_s8 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrev16t.8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev16t.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vrev16q_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev16q_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev16q_m_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev16t.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
+ {
+ return vrev16q_m_u8 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrev16t.8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev16t.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
+ {
+ return vrev16q_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev16q_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev16q_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrev16.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a)
+ {
+ return vrev16q_s8 (a);
+ }
+
+-/* { dg-final { scan-assembler "vrev16.8" } } */
+
++/*
++**foo1:
++** ...
++** vrev16.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a)
+ {
+ return vrev16q (a);
+ }
+
+-/* { dg-final { scan-assembler "vrev16.8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev16q_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev16q_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrev16.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a)
+ {
+- return vrev16q_u8 (a);
++ return vrev16q_u8 (a);
+ }
+
+-/* { dg-final { scan-assembler "vrev16.8" } } */
+
++/*
++**foo1:
++** ...
++** vrev16.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a)
+ {
+- return vrev16q (a);
++ return vrev16q (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vrev16.8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev16q_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev16q_x_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev16t.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, mve_pred16_t p)
+ {
+ return vrev16q_x_s8 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrev16t.8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev16t.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, mve_pred16_t p)
+ {
+ return vrev16q_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev16q_x_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev16q_x_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev16t.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, mve_pred16_t p)
+ {
+ return vrev16q_x_u8 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrev16t.8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev16t.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, mve_pred16_t p)
+ {
+ return vrev16q_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_f16.c
+@@ -1,13 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrev32.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a)
+ {
+ return vrev32q_f16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vrev32.16" } } */
++
++/*
++**foo1:
++** ...
++** vrev32.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++float16x8_t
++foo1 (float16x8_t a)
++{
++ return vrev32q (a);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_m_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev32t.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vrev32q_m_f16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrev32t.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev32t.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vrev32q_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_m_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev32t.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vrev32q_m_s16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrev32t.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev32t.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vrev32q_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_m_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev32t.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vrev32q_m_s8 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrev32t.8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev32t.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vrev32q_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_m_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev32t.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, mve_pred16_t p)
+ {
+ return vrev32q_m_u16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrev32t.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev32t.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, mve_pred16_t p)
+ {
+ return vrev32q_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_m_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev32t.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
+ {
+ return vrev32q_m_u8 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrev32t.8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev32t.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
+ {
+ return vrev32q_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_s16.c
+@@ -1,21 +1,41 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrev32.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a)
+ {
+ return vrev32q_s16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vrev32.16" } } */
+
++/*
++**foo1:
++** ...
++** vrev32.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a)
+ {
+ return vrev32q (a);
+ }
+
+-/* { dg-final { scan-assembler "vrev32.16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrev32.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a)
+ {
+ return vrev32q_s8 (a);
+ }
+
+-/* { dg-final { scan-assembler "vrev32.8" } } */
+
++/*
++**foo1:
++** ...
++** vrev32.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a)
+ {
+ return vrev32q (a);
+ }
+
+-/* { dg-final { scan-assembler "vrev32.8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrev32.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a)
+ {
+- return vrev32q_u16 (a);
++ return vrev32q_u16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vrev32.16" } } */
+
++/*
++**foo1:
++** ...
++** vrev32.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a)
+ {
+- return vrev32q (a);
++ return vrev32q (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vrev32.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrev32.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a)
+ {
+- return vrev32q_u8 (a);
++ return vrev32q_u8 (a);
+ }
+
+-/* { dg-final { scan-assembler "vrev32.8" } } */
+
++/*
++**foo1:
++** ...
++** vrev32.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a)
+ {
+- return vrev32q (a);
++ return vrev32q (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vrev32.8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_x_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_x_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev32t.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, mve_pred16_t p)
+ {
+ return vrev32q_x_f16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrev32t.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev32t.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, mve_pred16_t p)
+ {
+ return vrev32q_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_x_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev32t.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, mve_pred16_t p)
+ {
+ return vrev32q_x_s16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrev32t.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev32t.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, mve_pred16_t p)
+ {
+ return vrev32q_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_x_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev32t.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, mve_pred16_t p)
+ {
+ return vrev32q_x_s8 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrev32t.8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev32t.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, mve_pred16_t p)
+ {
+ return vrev32q_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_x_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_x_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev32t.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, mve_pred16_t p)
+ {
+ return vrev32q_x_u16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrev32t.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev32t.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, mve_pred16_t p)
+ {
+ return vrev32q_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_x_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_x_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev32t.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, mve_pred16_t p)
+ {
+ return vrev32q_x_u8 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrev32t.8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev32t.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, mve_pred16_t p)
+ {
+ return vrev32q_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_f16.c
+@@ -1,13 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrev64.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a)
+ {
+ return vrev64q_f16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vrev64.16" } } */
++
++/*
++**foo1:
++** ...
++** vrev64.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++float16x8_t
++foo1 (float16x8_t a)
++{
++ return vrev64q (a);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_f32.c
+@@ -1,13 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrev64.32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a)
+ {
+ return vrev64q_f32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vrev64.32" } } */
++
++/*
++**foo1:
++** ...
++** vrev64.32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++float32x4_t
++foo1 (float32x4_t a)
++{
++ return vrev64q (a);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vrev64q_m_f16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrev64t.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vrev64q_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vrev64q_m_f32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrev64t.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vrev64q_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_s16-clobber.c
+@@ -0,0 +1,17 @@
++/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
++/* { dg-add-options arm_v8_1m_mve_fp } */
++/* { dg-additional-options "-O2" } */
++
++#include "arm_mve.h"
++
++int16x8_t
++foo (int16x8_t a, mve_pred16_t p)
++{
++ return vrev64q_m_s16 (a, a, p);
++}
++
++float16x8_t
++foo2 (float16x8_t a, mve_pred16_t p)
++{
++ return vrev64q_m_f16 (a, a, p);
++}
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vrev64q_m_s16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrev64t.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vrev64q_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, mve_pred16_t p)
+ {
+ return vrev64q_m_s32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrev64t.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, mve_pred16_t p)
+ {
+ return vrev64q_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vrev64q_m_s8 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrev64t.8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vrev64q_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, mve_pred16_t p)
+ {
+ return vrev64q_m_u16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrev64t.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, mve_pred16_t p)
+ {
+ return vrev64q_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, mve_pred16_t p)
+ {
+ return vrev64q_m_u32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrev64t.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, mve_pred16_t p)
+ {
+ return vrev64q_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
+ {
+ return vrev64q_m_u8 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrev64t.8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
+ {
+ return vrev64q_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrev64.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a)
+ {
+ return vrev64q_s16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vrev64.16" } } */
+
++/*
++**foo1:
++** ...
++** vrev64.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a)
+ {
+ return vrev64q (a);
+ }
+
+-/* { dg-final { scan-assembler "vrev64.16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrev64.32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a)
+ {
+ return vrev64q_s32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vrev64.32" } } */
+
++/*
++**foo1:
++** ...
++** vrev64.32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a)
+ {
+ return vrev64q (a);
+ }
+
+-/* { dg-final { scan-assembler "vrev64.32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrev64.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a)
+ {
+ return vrev64q_s8 (a);
+ }
+
+-/* { dg-final { scan-assembler "vrev64.8" } } */
+
++/*
++**foo1:
++** ...
++** vrev64.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a)
+ {
+ return vrev64q (a);
+ }
+
+-/* { dg-final { scan-assembler "vrev64.8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrev64.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a)
+ {
+- return vrev64q_u16 (a);
++ return vrev64q_u16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vrev64.16" } } */
+
++/*
++**foo1:
++** ...
++** vrev64.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a)
+ {
+- return vrev64q (a);
++ return vrev64q (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vrev64.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrev64.32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a)
+ {
+- return vrev64q_u32 (a);
++ return vrev64q_u32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vrev64.32" } } */
+
++/*
++**foo1:
++** ...
++** vrev64.32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a)
+ {
+- return vrev64q (a);
++ return vrev64q (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vrev64.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrev64.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a)
+ {
+- return vrev64q_u8 (a);
++ return vrev64q_u8 (a);
+ }
+
+-/* { dg-final { scan-assembler "vrev64.8" } } */
+
++/*
++**foo1:
++** ...
++** vrev64.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a)
+ {
+- return vrev64q (a);
++ return vrev64q (a);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vrev64.8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, mve_pred16_t p)
+ {
+ return vrev64q_x_f16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrev64t.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, mve_pred16_t p)
+ {
+ return vrev64q_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, mve_pred16_t p)
+ {
+ return vrev64q_x_f32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrev64t.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, mve_pred16_t p)
+ {
+ return vrev64q_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, mve_pred16_t p)
+ {
+ return vrev64q_x_s16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrev64t.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, mve_pred16_t p)
+ {
+ return vrev64q_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, mve_pred16_t p)
+ {
+ return vrev64q_x_s32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrev64t.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, mve_pred16_t p)
+ {
+ return vrev64q_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, mve_pred16_t p)
+ {
+ return vrev64q_x_s8 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrev64t.8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, mve_pred16_t p)
+ {
+ return vrev64q_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, mve_pred16_t p)
+ {
+ return vrev64q_x_u16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrev64t.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, mve_pred16_t p)
+ {
+ return vrev64q_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, mve_pred16_t p)
+ {
+ return vrev64q_x_u32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrev64t.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, mve_pred16_t p)
+ {
+ return vrev64q_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, mve_pred16_t p)
+ {
+ return vrev64q_x_u8 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrev64t.8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrev64t.8 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, mve_pred16_t p)
+ {
+ return vrev64q_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrhaddt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vrhaddq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrhaddt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrhaddt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vrhaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrhaddt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrhaddt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vrhaddq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrhaddt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrhaddt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vrhaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrhaddt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrhaddt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vrhaddq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrhaddt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrhaddt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vrhaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrhaddt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_m_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrhaddt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vrhaddq_m_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrhaddt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrhaddt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vrhaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrhaddt.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_m_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrhaddt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vrhaddq_m_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrhaddt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrhaddt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vrhaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrhaddt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_m_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrhaddt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vrhaddq_m_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrhaddt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrhaddt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vrhaddq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrhaddt.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrhadd.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vrhaddq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrhadd.s16" } } */
+
++/*
++**foo1:
++** ...
++** vrhadd.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vrhaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrhadd.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrhadd.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vrhaddq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrhadd.s32" } } */
+
++/*
++**foo1:
++** ...
++** vrhadd.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vrhaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrhadd.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrhadd.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vrhaddq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrhadd.s8" } } */
+
++/*
++**foo1:
++** ...
++** vrhadd.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vrhaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrhadd.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrhadd.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b)
+ {
+ return vrhaddq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrhadd.u16" } } */
+
++/*
++**foo1:
++** ...
++** vrhadd.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b)
+ {
+ return vrhaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrhadd.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrhadd.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b)
+ {
+ return vrhaddq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrhadd.u32" } } */
+
++/*
++**foo1:
++** ...
++** vrhadd.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b)
+ {
+ return vrhaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrhadd.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrhadd.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+ return vrhaddq_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrhadd.u8" } } */
+
++/*
++**foo1:
++** ...
++** vrhadd.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b)
+ {
+ return vrhaddq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrhadd.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrhaddt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vrhaddq_x_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrhaddt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrhaddt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vrhaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrhaddt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vrhaddq_x_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrhaddt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrhaddt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vrhaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrhaddt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vrhaddq_x_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrhaddt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrhaddt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vrhaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrhaddt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vrhaddq_x_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrhaddt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrhaddt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vrhaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrhaddt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vrhaddq_x_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrhaddt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrhaddt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vrhaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrhaddt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vrhaddq_x_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrhaddt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrhaddt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vrhaddq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlaldavhaq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlaldavhaq_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmlaldavhat.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
+ {
+ return vrmlaldavhaq_p_s32 (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vrmlaldavhat.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmlaldavhat.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
+ {
+ return vrmlaldavhaq_p (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vrmlaldavhat.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlaldavhaq_p_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlaldavhaq_p_u32.c
+@@ -1,21 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmlaldavhat.u32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+ foo (uint64_t a, uint32x4_t b, uint32x4_t c, mve_pred16_t p)
+ {
+ return vrmlaldavhaq_p_u32 (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vrmlaldavhat.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmlaldavhat.u32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+ foo1 (uint64_t a, uint32x4_t b, uint32x4_t c, mve_pred16_t p)
+ {
+ return vrmlaldavhaq_p (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vrmlaldavhat.u32" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmlaldavhat.u32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++uint64_t
++foo2 (uint32x4_t b, uint32x4_t c, mve_pred16_t p)
++{
++ return vrmlaldavhaq_p (1, b, c, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlaldavhaq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlaldavhaq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrmlaldavha.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int64_t a, int32x4_t b, int32x4_t c)
+ {
+ return vrmlaldavhaq_s32 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vrmlaldavha.s32" } } */
+
++/*
++**foo1:
++** ...
++** vrmlaldavha.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int64_t a, int32x4_t b, int32x4_t c)
+ {
+ return vrmlaldavhaq (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vrmlaldavha.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlaldavhaq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlaldavhaq_u32.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrmlaldavha.u32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+ foo (uint64_t a, uint32x4_t b, uint32x4_t c)
+ {
+ return vrmlaldavhaq_u32 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vrmlaldavha.u32" } } */
+
++/*
++**foo1:
++** ...
++** vrmlaldavha.u32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+ foo1 (uint64_t a, uint32x4_t b, uint32x4_t c)
+ {
+ return vrmlaldavhaq (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vrmlaldavha.u32" } } */
++/*
++**foo2:
++** ...
++** vrmlaldavha.u32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++uint64_t
++foo2 (uint32x4_t b, uint32x4_t c)
++{
++ return vrmlaldavhaq (1, b, c);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlaldavhaxq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlaldavhaxq_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmlaldavhaxt.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
+ {
+ return vrmlaldavhaxq_p_s32 (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vrmlaldavhaxt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmlaldavhaxt.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
+ {
+ return vrmlaldavhaxq_p (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vrmlaldavhaxt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlaldavhaxq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlaldavhaxq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrmlaldavhax.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int64_t a, int32x4_t b, int32x4_t c)
+ {
+ return vrmlaldavhaxq_s32 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vrmlaldavhax.s32" } } */
+
++/*
++**foo1:
++** ...
++** vrmlaldavhax.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int64_t a, int32x4_t b, int32x4_t c)
+ {
+ return vrmlaldavhaxq (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vrmlaldavhax.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlaldavhq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlaldavhq_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmlaldavht.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vrmlaldavhq_p_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vrmlaldavht.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmlaldavht.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vrmlaldavhq_p (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vrmlaldavht.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlaldavhq_p_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlaldavhq_p_u32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmlaldavht.u32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+ foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vrmlaldavhq_p_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vrmlaldavht.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmlaldavht.u32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+ foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vrmlaldavhq_p (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vrmlaldavht.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlaldavhq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlaldavhq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrmlaldavh.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vrmlaldavhq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrmlaldavh.s32" } } */
+
++/*
++**foo1:
++** ...
++** vrmlaldavh.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vrmlaldavhq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrmlaldavh.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlaldavhq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlaldavhq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrmlaldavh.u32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+ foo (uint32x4_t a, uint32x4_t b)
+ {
+ return vrmlaldavhq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrmlaldavh.u32" } } */
+
++/*
++**foo1:
++** ...
++** vrmlaldavh.u32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint64_t
+ foo1 (uint32x4_t a, uint32x4_t b)
+ {
+ return vrmlaldavhq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrmlaldavh.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlaldavhxq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlaldavhxq_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmlaldavhxt.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vrmlaldavhxq_p_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vrmlaldavhxt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmlaldavhxt.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vrmlaldavhxq_p (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vrmlaldavhxt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlaldavhxq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlaldavhxq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrmlaldavhx.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vrmlaldavhxq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrmlaldavhx.s32" } } */
+
++/*
++**foo1:
++** ...
++** vrmlaldavhx.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vrmlaldavhxq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrmlaldavhx.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlsldavhaq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlsldavhaq_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmlsldavhat.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
+ {
+ return vrmlsldavhaq_p_s32 (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vrmlsldavhat.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmlsldavhat.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
+ {
+ return vrmlsldavhaq_p (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vrmlsldavhat.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlsldavhaq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlsldavhaq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrmlsldavha.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int64_t a, int32x4_t b, int32x4_t c)
+ {
+ return vrmlsldavhaq_s32 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vrmlsldavha.s32" } } */
+
++/*
++**foo1:
++** ...
++** vrmlsldavha.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int64_t a, int32x4_t b, int32x4_t c)
+ {
+ return vrmlsldavhaq (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vrmlsldavha.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlsldavhaxq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlsldavhaxq_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmlsldavhaxt.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
+ {
+ return vrmlsldavhaxq_p_s32 (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vrmlsldavhaxt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmlsldavhaxt.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
+ {
+ return vrmlsldavhaxq_p (a, b, c, p);
+ }
+
+-/* { dg-final { scan-assembler "vrmlsldavhaxt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlsldavhaxq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlsldavhaxq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrmlsldavhax.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int64_t a, int32x4_t b, int32x4_t c)
+ {
+ return vrmlsldavhaxq_s32 (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vrmlsldavhax.s32" } } */
+
++/*
++**foo1:
++** ...
++** vrmlsldavhax.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int64_t a, int32x4_t b, int32x4_t c)
+ {
+ return vrmlsldavhaxq (a, b, c);
+ }
+
+-/* { dg-final { scan-assembler "vrmlsldavhax.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlsldavhq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlsldavhq_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmlsldavht.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vrmlsldavhq_p_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vrmlsldavht.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmlsldavht.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vrmlsldavhq_p (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vrmlsldavht.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlsldavhq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlsldavhq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrmlsldavh.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vrmlsldavhq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrmlsldavh.s32" } } */
+
++/*
++**foo1:
++** ...
++** vrmlsldavh.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vrmlsldavhq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrmlsldavh.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlsldavhxq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlsldavhxq_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmlsldavhxt.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vrmlsldavhxq_p_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vrmlsldavhxt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmlsldavhxt.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vrmlsldavhxq_p (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vrmlsldavhxt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlsldavhxq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmlsldavhxq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrmlsldavhx.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vrmlsldavhxq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrmlsldavhx.s32" } } */
+
++/*
++**foo1:
++** ...
++** vrmlsldavhx.s32 (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int64_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vrmlsldavhxq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrmlsldavhx.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmulht.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vrmulhq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrmulht.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmulht.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vrmulhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrmulht.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmulht.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vrmulhq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrmulht.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmulht.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vrmulhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrmulht.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmulht.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vrmulhq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrmulht.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmulht.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vrmulhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrmulht.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_m_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmulht.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vrmulhq_m_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrmulht.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmulht.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vrmulhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrmulht.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_m_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmulht.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vrmulhq_m_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrmulht.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmulht.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vrmulhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrmulht.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_m_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmulht.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vrmulhq_m_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrmulht.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmulht.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vrmulhq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrmulht.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrmulh.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vrmulhq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrmulh.s16" } } */
+
++/*
++**foo1:
++** ...
++** vrmulh.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vrmulhq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrmulh.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrmulh.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vrmulhq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrmulh.s32" } } */
+
++/*
++**foo1:
++** ...
++** vrmulh.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vrmulhq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrmulh.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrmulh.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vrmulhq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrmulh.s8" } } */
+
++/*
++**foo1:
++** ...
++** vrmulh.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vrmulhq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrmulh.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrmulh.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b)
+ {
+ return vrmulhq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrmulh.u16" } } */
+
++/*
++**foo1:
++** ...
++** vrmulh.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b)
+ {
+ return vrmulhq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrmulh.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrmulh.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b)
+ {
+ return vrmulhq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrmulh.u32" } } */
+
++/*
++**foo1:
++** ...
++** vrmulh.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b)
+ {
+ return vrmulhq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrmulh.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrmulh.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+ return vrmulhq_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrmulh.u8" } } */
+
++/*
++**foo1:
++** ...
++** vrmulh.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b)
+ {
+ return vrmulhq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrmulh.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmulht.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vrmulhq_x_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrmulht.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmulht.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vrmulhq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmulht.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vrmulhq_x_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrmulht.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmulht.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vrmulhq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmulht.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vrmulhq_x_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrmulht.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmulht.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vrmulhq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmulht.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vrmulhq_x_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrmulht.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmulht.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vrmulhq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmulht.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vrmulhq_x_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrmulht.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmulht.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vrmulhq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmulht.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vrmulhq_x_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrmulht.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrmulht.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vrmulhq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndaq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndaq_f16.c
+@@ -1,13 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrinta.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a)
+ {
+ return vrndaq_f16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vrinta.f16" } } */
++
++/*
++**foo1:
++** ...
++** vrinta.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++float16x8_t
++foo1 (float16x8_t a)
++{
++ return vrndaq (a);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndaq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndaq_f32.c
+@@ -1,13 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrinta.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a)
+ {
+ return vrndaq_f32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vrinta.f32" } } */
++
++/*
++**foo1:
++** ...
++** vrinta.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++float32x4_t
++foo1 (float32x4_t a)
++{
++ return vrndaq (a);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndaq_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndaq_m_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintat.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vrndaq_m_f16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrintat.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintat.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vrndaq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndaq_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndaq_m_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintat.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vrndaq_m_f32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrintat.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintat.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vrndaq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndaq_x_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndaq_x_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintat.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, mve_pred16_t p)
+ {
+ return vrndaq_x_f16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrintat.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintat.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, mve_pred16_t p)
+ {
+ return vrndaq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndaq_x_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndaq_x_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintat.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, mve_pred16_t p)
+ {
+ return vrndaq_x_f32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrintat.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintat.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, mve_pred16_t p)
+ {
+ return vrndaq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndmq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndmq_f16.c
+@@ -1,13 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrintm.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a)
+ {
+ return vrndmq_f16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vrintm.f16" } } */
++
++/*
++**foo1:
++** ...
++** vrintm.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++float16x8_t
++foo1 (float16x8_t a)
++{
++ return vrndmq (a);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndmq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndmq_f32.c
+@@ -1,13 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrintm.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a)
+ {
+ return vrndmq_f32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vrintm.f32" } } */
++
++/*
++**foo1:
++** ...
++** vrintm.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++float32x4_t
++foo1 (float32x4_t a)
++{
++ return vrndmq (a);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndmq_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndmq_m_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintmt.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vrndmq_m_f16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrintmt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintmt.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vrndmq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndmq_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndmq_m_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintmt.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vrndmq_m_f32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrintmt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintmt.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vrndmq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndmq_x_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndmq_x_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintmt.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, mve_pred16_t p)
+ {
+ return vrndmq_x_f16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrintmt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintmt.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, mve_pred16_t p)
+ {
+ return vrndmq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndmq_x_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndmq_x_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintmt.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, mve_pred16_t p)
+ {
+ return vrndmq_x_f32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrintmt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintmt.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, mve_pred16_t p)
+ {
+ return vrndmq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndnq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndnq_f16.c
+@@ -1,13 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrintn.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a)
+ {
+ return vrndnq_f16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vrintn.f16" } } */
++
++/*
++**foo1:
++** ...
++** vrintn.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++float16x8_t
++foo1 (float16x8_t a)
++{
++ return vrndnq (a);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndnq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndnq_f32.c
+@@ -1,13 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrintn.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a)
+ {
+ return vrndnq_f32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vrintn.f32" } } */
++
++/*
++**foo1:
++** ...
++** vrintn.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++float32x4_t
++foo1 (float32x4_t a)
++{
++ return vrndnq (a);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndnq_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndnq_m_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintnt.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vrndnq_m_f16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrintnt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintnt.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vrndnq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndnq_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndnq_m_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintnt.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vrndnq_m_f32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrintnt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintnt.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vrndnq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndnq_x_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndnq_x_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintnt.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, mve_pred16_t p)
+ {
+ return vrndnq_x_f16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrintnt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintnt.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, mve_pred16_t p)
+ {
+ return vrndnq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndnq_x_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndnq_x_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintnt.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, mve_pred16_t p)
+ {
+ return vrndnq_x_f32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrintnt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintnt.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, mve_pred16_t p)
+ {
+ return vrndnq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndpq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndpq_f16.c
+@@ -1,13 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrintp.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a)
+ {
+ return vrndpq_f16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vrintp.f16" } } */
++
++/*
++**foo1:
++** ...
++** vrintp.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++float16x8_t
++foo1 (float16x8_t a)
++{
++ return vrndpq (a);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndpq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndpq_f32.c
+@@ -1,13 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrintp.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a)
+ {
+ return vrndpq_f32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vrintp.f32" } } */
++
++/*
++**foo1:
++** ...
++** vrintp.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++float32x4_t
++foo1 (float32x4_t a)
++{
++ return vrndpq (a);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndpq_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndpq_m_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintpt.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vrndpq_m_f16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrintpt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintpt.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vrndpq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndpq_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndpq_m_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintpt.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vrndpq_m_f32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrintpt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintpt.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vrndpq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndpq_x_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndpq_x_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintpt.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, mve_pred16_t p)
+ {
+ return vrndpq_x_f16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrintpt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintpt.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, mve_pred16_t p)
+ {
+ return vrndpq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndpq_x_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndpq_x_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintpt.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, mve_pred16_t p)
+ {
+ return vrndpq_x_f32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrintpt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintpt.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, mve_pred16_t p)
+ {
+ return vrndpq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndq_f16.c
+@@ -1,13 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrintz.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a)
+ {
+ return vrndq_f16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vrintz.f16" } } */
++
++/*
++**foo1:
++** ...
++** vrintz.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++float16x8_t
++foo1 (float16x8_t a)
++{
++ return vrndq (a);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndq_f32.c
+@@ -1,13 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrintz.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a)
+ {
+ return vrndq_f32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vrintz.f32" } } */
++
++/*
++**foo1:
++** ...
++** vrintz.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++float32x4_t
++foo1 (float32x4_t a)
++{
++ return vrndq (a);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndq_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndq_m_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintzt.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vrndq_m_f16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrintzt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintzt.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vrndq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndq_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndq_m_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintzt.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vrndq_m_f32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrintzt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintzt.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vrndq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndq_x_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndq_x_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintzt.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, mve_pred16_t p)
+ {
+ return vrndq_x_f16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrintzt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintzt.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, mve_pred16_t p)
+ {
+ return vrndq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndq_x_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndq_x_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintzt.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, mve_pred16_t p)
+ {
+ return vrndq_x_f32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrintzt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintzt.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, mve_pred16_t p)
+ {
+ return vrndq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndxq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndxq_f16.c
+@@ -1,13 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrintx.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a)
+ {
+ return vrndxq_f16 (a);
+ }
+
+-/* { dg-final { scan-assembler "vrintx.f16" } } */
++
++/*
++**foo1:
++** ...
++** vrintx.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++float16x8_t
++foo1 (float16x8_t a)
++{
++ return vrndxq (a);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndxq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndxq_f32.c
+@@ -1,13 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrintx.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a)
+ {
+ return vrndxq_f32 (a);
+ }
+
+-/* { dg-final { scan-assembler "vrintx.f32" } } */
++
++/*
++**foo1:
++** ...
++** vrintx.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++float32x4_t
++foo1 (float32x4_t a)
++{
++ return vrndxq (a);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndxq_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndxq_m_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintxt.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vrndxq_m_f16 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrintxt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintxt.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16x8_t a, mve_pred16_t p)
+ {
+ return vrndxq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndxq_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndxq_m_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintxt.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vrndxq_m_f32 (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrintxt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintxt.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, float32x4_t a, mve_pred16_t p)
+ {
+ return vrndxq_m (inactive, a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndxq_x_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndxq_x_f16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintxt.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, mve_pred16_t p)
+ {
+ return vrndxq_x_f16 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrintxt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintxt.f16 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, mve_pred16_t p)
+ {
+ return vrndxq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndxq_x_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndxq_x_f32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintxt.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, mve_pred16_t p)
+ {
+ return vrndxq_x_f32 (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrintxt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrintxt.f32 q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, mve_pred16_t p)
+ {
+ return vrndxq_x (a, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.s16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32_t b, mve_pred16_t p)
+ {
+ return vrshlq_m_n_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshlt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.s16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32_t b, mve_pred16_t p)
+ {
+ return vrshlq_m_n (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.s32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vrshlq_m_n_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshlt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.s32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vrshlq_m_n (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.s8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int32_t b, mve_pred16_t p)
+ {
+ return vrshlq_m_n_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshlt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.s8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int32_t b, mve_pred16_t p)
+ {
+ return vrshlq_m_n (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.u16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int32_t b, mve_pred16_t p)
+ {
+ return vrshlq_m_n_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshlt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.u16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, int32_t b, mve_pred16_t p)
+ {
+ return vrshlq_m_n (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.u32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vrshlq_m_n_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshlt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.u32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vrshlq_m_n (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.u8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int32_t b, mve_pred16_t p)
+ {
+ return vrshlq_m_n_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshlt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.u8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, int32_t b, mve_pred16_t p)
+ {
+ return vrshlq_m_n (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vrshlq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshlt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vrshlq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshlt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vrshlq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshlt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vrshlq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshlt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vrshlq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshlt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vrshlq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshlt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vrshlq_m_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshlt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vrshlq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshlt.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vrshlq_m_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshlt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vrshlq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshlt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vrshlq_m_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshlt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vrshlq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshlt.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrshl.s16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32_t b)
+ {
+ return vrshlq_n_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrshl.s16" } } */
+
++/*
++**foo1:
++** ...
++** vrshl.s16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32_t b)
+ {
+ return vrshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrshl.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrshl.s32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32_t b)
+ {
+ return vrshlq_n_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrshl.s32" } } */
+
++/*
++**foo1:
++** ...
++** vrshl.s32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32_t b)
+ {
+ return vrshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrshl.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrshl.s8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int32_t b)
+ {
+ return vrshlq_n_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrshl.s8" } } */
+
++/*
++**foo1:
++** ...
++** vrshl.s8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int32_t b)
+ {
+ return vrshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrshl.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrshl.u16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int32_t b)
+ {
+ return vrshlq_n_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrshl.u16" } } */
+
++/*
++**foo1:
++** ...
++** vrshl.u16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, int32_t b)
+ {
+ return vrshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrshl.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrshl.u32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, int32_t b)
+ {
+ return vrshlq_n_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrshl.u32" } } */
+
++/*
++**foo1:
++** ...
++** vrshl.u32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, int32_t b)
+ {
+ return vrshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrshl.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrshl.u8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int32_t b)
+ {
+ return vrshlq_n_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrshl.u8" } } */
+
++/*
++**foo1:
++** ...
++** vrshl.u8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, int32_t b)
+ {
+ return vrshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrshl.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrshl.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vrshlq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrshl.s16" } } */
+
++/*
++**foo1:
++** ...
++** vrshl.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vrshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrshl.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrshl.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vrshlq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrshl.s32" } } */
+
++/*
++**foo1:
++** ...
++** vrshl.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vrshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrshl.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrshl.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vrshlq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrshl.s8" } } */
+
++/*
++**foo1:
++** ...
++** vrshl.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vrshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrshl.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrshl.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int16x8_t b)
+ {
+ return vrshlq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrshl.u16" } } */
+
++/*
++**foo1:
++** ...
++** vrshl.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, int16x8_t b)
+ {
+ return vrshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrshl.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrshl.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, int32x4_t b)
+ {
+ return vrshlq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrshl.u32" } } */
+
++/*
++**foo1:
++** ...
++** vrshl.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, int32x4_t b)
+ {
+ return vrshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrshl.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrshl.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int8x16_t b)
+ {
+ return vrshlq_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrshl.u8" } } */
+
++/*
++**foo1:
++** ...
++** vrshl.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, int8x16_t b)
+ {
+ return vrshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vrshl.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vrshlq_x_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshlt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vrshlq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vrshlq_x_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshlt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vrshlq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vrshlq_x_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshlt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vrshlq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vrshlq_x_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshlt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vrshlq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vrshlq_x_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshlt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vrshlq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vrshlq_x_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshlt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshlt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vrshlq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrnbq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrnbq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrnbt.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vrshrnbq_m_n_s16 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrnbt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrnbt.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vrshrnbq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrnbt.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrnbq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrnbq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrnbt.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vrshrnbq_m_n_s32 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrnbt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrnbt.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vrshrnbq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrnbt.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrnbq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrnbq_m_n_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrnbt.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vrshrnbq_m_n_u16 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrnbt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrnbt.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vrshrnbq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrnbt.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrnbq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrnbq_m_n_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrnbt.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vrshrnbq_m_n_u32 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrnbt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrnbt.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vrshrnbq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrnbt.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrnbq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrnbq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrshrnb.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int16x8_t b)
+ {
+ return vrshrnbq_n_s16 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vrshrnb.i16" } } */
+
++/*
++**foo1:
++** ...
++** vrshrnb.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int16x8_t b)
+ {
+ return vrshrnbq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vrshrnb.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrnbq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrnbq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrshrnb.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32x4_t b)
+ {
+ return vrshrnbq_n_s32 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vrshrnb.i32" } } */
+
++/*
++**foo1:
++** ...
++** vrshrnb.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32x4_t b)
+ {
+ return vrshrnbq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vrshrnb.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrnbq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrnbq_n_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrshrnb.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint16x8_t b)
+ {
+ return vrshrnbq_n_u16 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vrshrnb.i16" } } */
+
++/*
++**foo1:
++** ...
++** vrshrnb.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint16x8_t b)
+ {
+ return vrshrnbq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vrshrnb.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrnbq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrnbq_n_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrshrnb.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint32x4_t b)
+ {
+ return vrshrnbq_n_u32 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vrshrnb.i32" } } */
+
++/*
++**foo1:
++** ...
++** vrshrnb.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint32x4_t b)
+ {
+ return vrshrnbq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vrshrnb.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrntq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrntq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrntt.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vrshrntq_m_n_s16 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrntt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrntt.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vrshrntq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrntt.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrntq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrntq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrntt.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vrshrntq_m_n_s32 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrntt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrntt.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vrshrntq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrntt.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrntq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrntq_m_n_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrntt.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vrshrntq_m_n_u16 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrntt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrntt.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vrshrntq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrntt.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrntq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrntq_m_n_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrntt.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vrshrntq_m_n_u32 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrntt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrntt.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vrshrntq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrntt.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrntq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrntq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrshrnt.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int16x8_t b)
+ {
+ return vrshrntq_n_s16 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vrshrnt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vrshrnt.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int16x8_t b)
+ {
+ return vrshrntq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vrshrnt.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrntq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrntq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrshrnt.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32x4_t b)
+ {
+ return vrshrntq_n_s32 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vrshrnt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vrshrnt.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32x4_t b)
+ {
+ return vrshrntq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vrshrnt.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrntq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrntq_n_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrshrnt.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint16x8_t b)
+ {
+ return vrshrntq_n_u16 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vrshrnt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vrshrnt.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint16x8_t b)
+ {
+ return vrshrntq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vrshrnt.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrntq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrntq_n_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrshrnt.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint32x4_t b)
+ {
+ return vrshrntq_n_u32 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vrshrnt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vrshrnt.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint32x4_t b)
+ {
+ return vrshrntq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vrshrnt.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+- return vrshrq_m_n_s16 (inactive, a, 16, p);
++ return vrshrq_m_n_s16 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+- return vrshrq_m (inactive, a, 16, p);
++ return vrshrq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrt.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, mve_pred16_t p)
+ {
+- return vrshrq_m_n_s32 (inactive, a, 32, p);
++ return vrshrq_m_n_s32 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, mve_pred16_t p)
+ {
+- return vrshrq_m (inactive, a, 32, p);
++ return vrshrq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrt.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_m_n_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrt.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+- return vrshrq_m_n_s8 (inactive, a, 8, p);
++ return vrshrq_m_n_s8 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrt.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+- return vrshrq_m (inactive, a, 8, p);
++ return vrshrq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrt.s8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_m_n_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, mve_pred16_t p)
+ {
+- return vrshrq_m_n_u16 (inactive, a, 16, p);
++ return vrshrq_m_n_u16 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, mve_pred16_t p)
+ {
+- return vrshrq_m (inactive, a, 16, p);
++ return vrshrq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrt.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_m_n_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrt.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, mve_pred16_t p)
+ {
+- return vrshrq_m_n_u32 (inactive, a, 32, p);
++ return vrshrq_m_n_u32 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrt.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, mve_pred16_t p)
+ {
+- return vrshrq_m (inactive, a, 32, p);
++ return vrshrq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrt.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_m_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_m_n_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrt.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
+ {
+- return vrshrq_m_n_u8 (inactive, a, 8, p);
++ return vrshrq_m_n_u8 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrt.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
+ {
+- return vrshrq_m (inactive, a, 8, p);
++ return vrshrq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrt.u8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrshr.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a)
+ {
+- return vrshrq_n_s16 (a, 16);
++ return vrshrq_n_s16 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vrshr.s16" } } */
+
++/*
++**foo1:
++** ...
++** vrshr.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a)
+ {
+- return vrshrq (a, 16);
++ return vrshrq (a, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vrshr.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrshr.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a)
+ {
+- return vrshrq_n_s32 (a, 32);
++ return vrshrq_n_s32 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vrshr.s32" } } */
+
++/*
++**foo1:
++** ...
++** vrshr.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a)
+ {
+- return vrshrq (a, 32);
++ return vrshrq (a, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vrshr.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrshr.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a)
+ {
+- return vrshrq_n_s8 (a, 8);
++ return vrshrq_n_s8 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vrshr.s8" } } */
+
++/*
++**foo1:
++** ...
++** vrshr.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a)
+ {
+- return vrshrq (a, 8);
++ return vrshrq (a, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vrshr.s8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrshr.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a)
+ {
+- return vrshrq_n_u16 (a, 16);
++ return vrshrq_n_u16 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vrshr.u16" } } */
+
++/*
++**foo1:
++** ...
++** vrshr.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a)
+ {
+- return vrshrq (a, 16);
++ return vrshrq (a, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vrshr.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrshr.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a)
+ {
+- return vrshrq_n_u32 (a, 32);
++ return vrshrq_n_u32 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vrshr.u32" } } */
+
++/*
++**foo1:
++** ...
++** vrshr.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a)
+ {
+- return vrshrq (a, 32);
++ return vrshrq (a, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vrshr.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vrshr.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a)
+ {
+- return vrshrq_n_u8 (a, 8);
++ return vrshrq_n_u8 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vrshr.u8" } } */
+
++/*
++**foo1:
++** ...
++** vrshr.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a)
+ {
+- return vrshrq (a, 8);
++ return vrshrq (a, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vrshr.u8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, mve_pred16_t p)
+ {
+- return vrshrq_x_n_s16 (a, 16, p);
++ return vrshrq_x_n_s16 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, mve_pred16_t p)
+ {
+- return vrshrq_x (a, 16, p);
++ return vrshrq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrt.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, mve_pred16_t p)
+ {
+- return vrshrq_x_n_s32 (a, 32, p);
++ return vrshrq_x_n_s32 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, mve_pred16_t p)
+ {
+- return vrshrq_x (a, 32, p);
++ return vrshrq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrt.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrt.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, mve_pred16_t p)
+ {
+- return vrshrq_x_n_s8 (a, 8, p);
++ return vrshrq_x_n_s8 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrt.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, mve_pred16_t p)
+ {
+- return vrshrq_x (a, 8, p);
++ return vrshrq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrt.s8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, mve_pred16_t p)
+ {
+- return vrshrq_x_n_u16 (a, 16, p);
++ return vrshrq_x_n_u16 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, mve_pred16_t p)
+ {
+- return vrshrq_x (a, 16, p);
++ return vrshrq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrt.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrt.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, mve_pred16_t p)
+ {
+- return vrshrq_x_n_u32 (a, 32, p);
++ return vrshrq_x_n_u32 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrt.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, mve_pred16_t p)
+ {
+- return vrshrq_x (a, 32, p);
++ return vrshrq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrt.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrt.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, mve_pred16_t p)
+ {
+- return vrshrq_x_n_u8 (a, 8, p);
++ return vrshrq_x_n_u8 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vrshrt.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, mve_pred16_t p)
+ {
+- return vrshrq_x (a, 8, p);
++ return vrshrq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vrshrt.u8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_m_s32.c
+@@ -1,23 +1,57 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsbcit.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned * carry_out, mve_pred16_t p)
++foo (int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned *carry_out, mve_pred16_t p)
+ {
+ return vsbciq_m_s32 (inactive, a, b, carry_out, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsbcit.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsbcit.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned * carry_out, mve_pred16_t p)
++foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned *carry_out, mve_pred16_t p)
+ {
+ return vsbciq_m (inactive, a, b, carry_out, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsbcit.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_m_u32.c
+@@ -1,23 +1,57 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsbcit.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned * carry_out, mve_pred16_t p)
++foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned *carry_out, mve_pred16_t p)
+ {
+ return vsbciq_m_u32 (inactive, a, b, carry_out, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsbcit.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsbcit.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned * carry_out, mve_pred16_t p)
++foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned *carry_out, mve_pred16_t p)
+ {
+ return vsbciq_m (inactive, a, b, carry_out, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsbcit.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vsbci.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int32x4_t a, int32x4_t b, unsigned * carry_out)
++foo (int32x4_t a, int32x4_t b, unsigned *carry_out)
+ {
+ return vsbciq_s32 (a, b, carry_out);
+ }
+
+-/* { dg-final { scan-assembler "vsbci.i32" } } */
+
++/*
++**foo1:
++** ...
++** vsbci.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int32x4_t a, int32x4_t b, unsigned * carry_out)
++foo1 (int32x4_t a, int32x4_t b, unsigned *carry_out)
+ {
+- return vsbciq_s32 (a, b, carry_out);
++ return vsbciq (a, b, carry_out);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vsbci.i32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_u32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vsbci.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint32x4_t a, uint32x4_t b, unsigned * carry_out)
++foo (uint32x4_t a, uint32x4_t b, unsigned *carry_out)
+ {
+ return vsbciq_u32 (a, b, carry_out);
+ }
+
+-/* { dg-final { scan-assembler "vsbci.i32" } } */
+
++/*
++**foo1:
++** ...
++** vsbci.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (uint32x4_t a, uint32x4_t b, unsigned * carry_out)
++foo1 (uint32x4_t a, uint32x4_t b, unsigned *carry_out)
+ {
+- return vsbciq_u32 (a, b, carry_out);
++ return vsbciq (a, b, carry_out);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vsbci.i32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_m_s32.c
+@@ -1,23 +1,77 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** lsls (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29(?: @.*|)
++** ...
++** and (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #536870912(?: @.*|)
++** ...
++** orrs (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmsr FPSCR_nzcvqc, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsbct.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned * carry, mve_pred16_t p)
++foo (int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned *carry, mve_pred16_t p)
+ {
+- return vsbcq_m_s32 (inactive, a, b, carry, p);
++ return vsbcq_m_s32 (inactive, a, b, carry, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsbct.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** lsls (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29(?: @.*|)
++** ...
++** and (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #536870912(?: @.*|)
++** ...
++** orrs (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmsr FPSCR_nzcvqc, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsbct.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1(int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned * carry, mve_pred16_t p)
++foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned *carry, mve_pred16_t p)
+ {
+- return vsbcq_m (inactive, a, b, carry, p);
++ return vsbcq_m (inactive, a, b, carry, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsbct.i32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_m_u32.c
+@@ -1,22 +1,77 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** lsls (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29(?: @.*|)
++** ...
++** and (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #536870912(?: @.*|)
++** ...
++** orrs (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmsr FPSCR_nzcvqc, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsbct.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned * carry, mve_pred16_t p)
++foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned *carry, mve_pred16_t p)
+ {
+- return vsbcq_m_u32 (inactive, a, b, carry, p);
++ return vsbcq_m_u32 (inactive, a, b, carry, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsbct.i32" } } */
++
++/*
++**foo1:
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** lsls (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29(?: @.*|)
++** ...
++** and (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #536870912(?: @.*|)
++** ...
++** orrs (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmsr FPSCR_nzcvqc, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsbct.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned * carry, mve_pred16_t p)
++foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned *carry, mve_pred16_t p)
+ {
+- return vsbcq_m (inactive, a, b, carry, p);
++ return vsbcq_m (inactive, a, b, carry, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsbct.i32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_s32.c
+@@ -1,21 +1,69 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** lsls (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29(?: @.*|)
++** ...
++** and (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #536870912(?: @.*|)
++** ...
++** orrs (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmsr FPSCR_nzcvqc, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vsbc.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int32x4_t a, int32x4_t b, unsigned * carry)
++foo (int32x4_t a, int32x4_t b, unsigned *carry)
+ {
+ return vsbcq_s32 (a, b, carry);
+ }
+
+-/* { dg-final { scan-assembler "vsbc.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** lsls (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29(?: @.*|)
++** ...
++** and (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #536870912(?: @.*|)
++** ...
++** orrs (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmsr FPSCR_nzcvqc, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vsbc.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int32x4_t a, int32x4_t b, unsigned * carry)
++foo1 (int32x4_t a, int32x4_t b, unsigned *carry)
+ {
+ return vsbcq (a, b, carry);
+ }
+
+-/* { dg-final { scan-assembler "vsbc.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_u32.c
+@@ -1,21 +1,69 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** lsls (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29(?: @.*|)
++** ...
++** and (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #536870912(?: @.*|)
++** ...
++** orrs (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmsr FPSCR_nzcvqc, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vsbc.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint32x4_t a, uint32x4_t b, unsigned * carry)
++foo (uint32x4_t a, uint32x4_t b, unsigned *carry)
+ {
+ return vsbcq_u32 (a, b, carry);
+ }
+
+-/* { dg-final { scan-assembler "vsbc.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** lsls (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29(?: @.*|)
++** ...
++** and (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #536870912(?: @.*|)
++** ...
++** orrs (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vmsr FPSCR_nzcvqc, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vsbc.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
++** ...
++** ubfx (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (uint32x4_t a, uint32x4_t b, unsigned * carry)
++foo1 (uint32x4_t a, uint32x4_t b, unsigned *carry)
+ {
+ return vsbcq (a, b, carry);
+ }
+
+-/* { dg-final { scan-assembler "vsbc.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsetq_lane_f16-1.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-skip-if "Incompatible float ABI" { *-*-* } { "-mfloat-abi=soft" } {""} } */
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float16x8_t
+-foo (float16_t a, float16x8_t b)
+-{
+- return vsetq_lane (23.26, b, 0);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsetq_lane_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsetq_lane_f16.c
+@@ -1,15 +1,53 @@
+-/* { dg-skip-if "Incompatible float ABI" { *-*-* } { "-mfloat-abi=soft" } {""} } */
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmov.16 q[0-9]+\[1\], (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16_t a, float16x8_t b)
+ {
+- return vsetq_lane_f16 (a, b, 0);
++ return vsetq_lane_f16 (a, b, 1);
++}
++
++
++/*
++**foo1:
++** ...
++** vmov.16 q[0-9]+\[1\], (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float16x8_t
++foo1 (float16_t a, float16x8_t b)
++{
++ return vsetq_lane (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vmov.16" } } */
++/*
++**foo2:
++** ...
++** vmov.16 q[0-9]+\[1\], (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float16x8_t
++foo2 (float16x8_t b)
++{
++ return vsetq_lane (1.1, b, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
+
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsetq_lane_f32-1.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-skip-if "Incompatible float ABI" { *-*-* } { "-mfloat-abi=soft" } {""} } */
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float32x4_t
+-foo (float32_t a, float32x4_t b)
+-{
+- return vsetq_lane (23.34, b, 0);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsetq_lane_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsetq_lane_f32.c
+@@ -1,15 +1,53 @@
+-/* { dg-skip-if "Incompatible float ABI" { *-*-* } { "-mfloat-abi=soft" } {""} } */
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmov.32 q[0-9]+\[1\], (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32_t a, float32x4_t b)
+ {
+- return vsetq_lane_f32 (a, b, 0);
++ return vsetq_lane_f32 (a, b, 1);
++}
++
++
++/*
++**foo1:
++** ...
++** vmov.32 q[0-9]+\[1\], (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float32x4_t
++foo1 (float32_t a, float32x4_t b)
++{
++ return vsetq_lane (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vmov.32" } } */
++/*
++**foo2:
++** ...
++** vmov.32 q[0-9]+\[1\], (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float32x4_t
++foo2 (float32x4_t b)
++{
++ return vsetq_lane (1.1, b, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
+
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsetq_lane_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsetq_lane_s16.c
+@@ -1,15 +1,41 @@
+-/* { dg-skip-if "Incompatible float ABI" { *-*-* } { "-mfloat-abi=soft" } {""} } */
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmov.16 q[0-9]+\[1\], (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16_t a, int16x8_t b)
+ {
+- return vsetq_lane_s16 (a, b, 0);
++ return vsetq_lane_s16 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vmov.16" } } */
+
++/*
++**foo1:
++** ...
++** vmov.16 q[0-9]+\[1\], (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++int16x8_t
++foo1 (int16_t a, int16x8_t b)
++{
++ return vsetq_lane (a, b, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsetq_lane_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsetq_lane_s32.c
+@@ -1,15 +1,41 @@
+-/* { dg-skip-if "Incompatible float ABI" { *-*-* } { "-mfloat-abi=soft" } {""} } */
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmov.32 q[0-9]+\[1\], (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32_t a, int32x4_t b)
+ {
+- return vsetq_lane_s32 (a, b, 0);
++ return vsetq_lane_s32 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vmov.32" } } */
+
++/*
++**foo1:
++** ...
++** vmov.32 q[0-9]+\[1\], (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++int32x4_t
++foo1 (int32_t a, int32x4_t b)
++{
++ return vsetq_lane (a, b, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsetq_lane_s64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsetq_lane_s64.c
+@@ -1,16 +1,41 @@
+-/* { dg-skip-if "Incompatible float ABI" { *-*-* } { "-mfloat-abi=soft" } {""} } */
+-/* { dg-require-effective-target arm_hard_ok } */
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-mfloat-abi=hard -O2" } */
++/* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmov d[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int64x2_t
+ foo (int64_t a, int64x2_t b)
+ {
+- return vsetq_lane_s64 (a, b, 0);
++ return vsetq_lane_s64 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler {vmov\td0, r[1-9]*[0-9], r[1-9]*[0-9]} } } */
+
++/*
++**foo1:
++** ...
++** vmov d[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++int64x2_t
++foo1 (int64_t a, int64x2_t b)
++{
++ return vsetq_lane (a, b, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsetq_lane_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsetq_lane_s8.c
+@@ -1,15 +1,41 @@
+-/* { dg-skip-if "Incompatible float ABI" { *-*-* } { "-mfloat-abi=soft" } {""} } */
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmov.8 q[0-9]+\[1\], (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8_t a, int8x16_t b)
+ {
+- return vsetq_lane_s8 (a, b, 0);
++ return vsetq_lane_s8 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vmov.8" } } */
+
++/*
++**foo1:
++** ...
++** vmov.8 q[0-9]+\[1\], (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++int8x16_t
++foo1 (int8_t a, int8x16_t b)
++{
++ return vsetq_lane (a, b, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsetq_lane_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsetq_lane_u16.c
+@@ -1,15 +1,53 @@
+-/* { dg-skip-if "Incompatible float ABI" { *-*-* } { "-mfloat-abi=soft" } {""} } */
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmov.16 q[0-9]+\[1\], (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16_t a, uint16x8_t b)
+ {
+- return vsetq_lane_u16 (a, b, 0);
++ return vsetq_lane_u16 (a, b, 1);
++}
++
++
++/*
++**foo1:
++** ...
++** vmov.16 q[0-9]+\[1\], (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo1 (uint16_t a, uint16x8_t b)
++{
++ return vsetq_lane (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vmov.16" } } */
++/*
++**foo2:
++** ...
++** vmov.16 q[0-9]+\[1\], (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t b)
++{
++ return vsetq_lane (1, b, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
+
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsetq_lane_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsetq_lane_u32.c
+@@ -1,15 +1,53 @@
+-/* { dg-skip-if "Incompatible float ABI" { *-*-* } { "-mfloat-abi=soft" } {""} } */
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmov.32 q[0-9]+\[1\], (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32_t a, uint32x4_t b)
+ {
+- return vsetq_lane_u32 (a, b, 0);
++ return vsetq_lane_u32 (a, b, 1);
++}
++
++
++/*
++**foo1:
++** ...
++** vmov.32 q[0-9]+\[1\], (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo1 (uint32_t a, uint32x4_t b)
++{
++ return vsetq_lane (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vmov.32" } } */
++/*
++**foo2:
++** ...
++** vmov.32 q[0-9]+\[1\], (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t b)
++{
++ return vsetq_lane (1, b, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
+
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsetq_lane_u64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsetq_lane_u64.c
+@@ -1,16 +1,53 @@
+-/* { dg-skip-if "Incompatible float ABI" { *-*-* } { "-mfloat-abi=soft" } {""} } */
+-/* { dg-require-effective-target arm_hard_ok } */
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+-/* { dg-additional-options "-mfloat-abi=hard -O2" } */
++/* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmov d[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint64x2_t
+ foo (uint64_t a, uint64x2_t b)
+ {
+- return vsetq_lane_u64 (a, b, 0);
++ return vsetq_lane_u64 (a, b, 1);
++}
++
++
++/*
++**foo1:
++** ...
++** vmov d[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint64x2_t
++foo1 (uint64_t a, uint64x2_t b)
++{
++ return vsetq_lane (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler {vmov\td0, r[1-9]*[0-9], r[1-9]*[0-9]} } } */
++/*
++**foo2:
++** ...
++** vmov d[0-9]+, (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint64x2_t
++foo2 (uint64x2_t b)
++{
++ return vsetq_lane (1, b, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
+
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsetq_lane_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsetq_lane_u8.c
+@@ -1,15 +1,53 @@
+-/* { dg-skip-if "Incompatible float ABI" { *-*-* } { "-mfloat-abi=soft" } {""} } */
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmov.8 q[0-9]+\[1\], (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8_t a, uint8x16_t b)
+ {
+- return vsetq_lane_u8 (a, b, 0);
++ return vsetq_lane_u8 (a, b, 1);
++}
++
++
++/*
++**foo1:
++** ...
++** vmov.8 q[0-9]+\[1\], (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo1 (uint8_t a, uint8x16_t b)
++{
++ return vsetq_lane (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vmov.8" } } */
++/*
++**foo2:
++** ...
++** vmov.8 q[0-9]+\[1\], (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t b)
++{
++ return vsetq_lane (1, b, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
+
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_m_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlct q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo (int16x8_t a, uint32_t * b, mve_pred16_t p)
++foo (int16x8_t a, uint32_t *b, mve_pred16_t p)
+ {
+- return vshlcq_m_s16 (a, b, 32, p);
++ return vshlcq_m_s16 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlct" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlct q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo1 (int16x8_t a, uint32_t * b, mve_pred16_t p)
++foo1 (int16x8_t a, uint32_t *b, mve_pred16_t p)
+ {
+- return vshlcq_m (a, b, 32, p);
++ return vshlcq_m (a, b, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlct" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_m_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlct q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int32x4_t a, uint32_t * b, mve_pred16_t p)
++foo (int32x4_t a, uint32_t *b, mve_pred16_t p)
+ {
+ return vshlcq_m_s32 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlct" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlct q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int32x4_t a, uint32_t * b, mve_pred16_t p)
++foo1 (int32x4_t a, uint32_t *b, mve_pred16_t p)
+ {
+ return vshlcq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlct" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_m_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlct q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo (int8x16_t a, uint32_t * b, mve_pred16_t p)
++foo (int8x16_t a, uint32_t *b, mve_pred16_t p)
+ {
+ return vshlcq_m_s8 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlct" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlct q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo1 (int8x16_t a, uint32_t * b, mve_pred16_t p)
++foo1 (int8x16_t a, uint32_t *b, mve_pred16_t p)
+ {
+ return vshlcq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlct" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_m_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlct q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo (uint16x8_t a, uint32_t * b, mve_pred16_t p)
++foo (uint16x8_t a, uint32_t *b, mve_pred16_t p)
+ {
+ return vshlcq_m_u16 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlct" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlct q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo1 (uint16x8_t a, uint32_t * b, mve_pred16_t p)
++foo1 (uint16x8_t a, uint32_t *b, mve_pred16_t p)
+ {
+ return vshlcq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlct" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_m_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlct q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint32x4_t a, uint32_t * b, mve_pred16_t p)
++foo (uint32x4_t a, uint32_t *b, mve_pred16_t p)
+ {
+ return vshlcq_m_u32 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlct" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlct q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (uint32x4_t a, uint32_t * b, mve_pred16_t p)
++foo1 (uint32x4_t a, uint32_t *b, mve_pred16_t p)
+ {
+ return vshlcq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlct" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_m_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlct q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo (uint8x16_t a, uint32_t * b, mve_pred16_t p)
++foo (uint8x16_t a, uint32_t *b, mve_pred16_t p)
+ {
+ return vshlcq_m_u8 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlct" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlct q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo1 (uint8x16_t a, uint32_t * b, mve_pred16_t p)
++foo1 (uint8x16_t a, uint32_t *b, mve_pred16_t p)
+ {
+ return vshlcq_m (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlct" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshlc q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo (int16x8_t a, uint32_t * b)
++foo (int16x8_t a, uint32_t *b)
+ {
+ return vshlcq_s16 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshlc" } } */
+
++/*
++**foo1:
++** ...
++** vshlc q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+-foo1 (int16x8_t a, uint32_t * b)
++foo1 (int16x8_t a, uint32_t *b)
+ {
+ return vshlcq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshlc" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshlc q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo (int32x4_t a, uint32_t * b)
++foo (int32x4_t a, uint32_t *b)
+ {
+ return vshlcq_s32 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshlc" } } */
+
++/*
++**foo1:
++** ...
++** vshlc q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+-foo1 (int32x4_t a, uint32_t * b)
++foo1 (int32x4_t a, uint32_t *b)
+ {
+ return vshlcq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshlc" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshlc q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo (int8x16_t a, uint32_t * b)
++foo (int8x16_t a, uint32_t *b)
+ {
+ return vshlcq_s8 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshlc" } } */
+
++/*
++**foo1:
++** ...
++** vshlc q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+-foo1 (int8x16_t a, uint32_t * b)
++foo1 (int8x16_t a, uint32_t *b)
+ {
+ return vshlcq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshlc" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshlc q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo (uint16x8_t a, uint32_t * b)
++foo (uint16x8_t a, uint32_t *b)
+ {
+ return vshlcq_u16 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshlc" } } */
+
++/*
++**foo1:
++** ...
++** vshlc q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+-foo1 (uint16x8_t a, uint32_t * b)
++foo1 (uint16x8_t a, uint32_t *b)
+ {
+ return vshlcq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshlc" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshlc q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo (uint32x4_t a, uint32_t * b)
++foo (uint32x4_t a, uint32_t *b)
+ {
+ return vshlcq_u32 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshlc" } } */
+
++/*
++**foo1:
++** ...
++** vshlc q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+-foo1 (uint32x4_t a, uint32_t * b)
++foo1 (uint32x4_t a, uint32_t *b)
+ {
+ return vshlcq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshlc" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshlc q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo (uint8x16_t a, uint32_t * b)
++foo (uint8x16_t a, uint32_t *b)
+ {
+ return vshlcq_u8 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshlc" } } */
+
++/*
++**foo1:
++** ...
++** vshlc q[0-9]+, (?:ip|fp|r[0-9]+), #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+-foo1 (uint8x16_t a, uint32_t * b)
++foo1 (uint8x16_t a, uint32_t *b)
+ {
+ return vshlcq (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshlc" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshllbt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vshllbq_m_n_s16 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshllbt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshllbt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vshllbq_m (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshllbt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_m_n_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshllbt.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vshllbq_m_n_s8 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshllbt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshllbt.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vshllbq_m (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshllbt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_m_n_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshllbt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint16x8_t a, mve_pred16_t p)
+ {
+ return vshllbq_m_n_u16 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshllbt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshllbt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint16x8_t a, mve_pred16_t p)
+ {
+ return vshllbq_m (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshllbt.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_m_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_m_n_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshllbt.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint8x16_t a, mve_pred16_t p)
+ {
+ return vshllbq_m_n_u8 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshllbt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshllbt.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint8x16_t a, mve_pred16_t p)
+ {
+ return vshllbq_m (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshllbt.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshllb.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int16x8_t a)
+ {
+ return vshllbq_n_s16 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshllb.s16" } } */
+
++/*
++**foo1:
++** ...
++** vshllb.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int16x8_t a)
+ {
+ return vshllbq (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshllb.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_n_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshllb.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int8x16_t a)
+ {
+ return vshllbq_n_s8 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshllb.s8" } } */
+
++/*
++**foo1:
++** ...
++** vshllb.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int8x16_t a)
+ {
+ return vshllbq (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshllb.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_n_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshllb.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint16x8_t a)
+ {
+ return vshllbq_n_u16 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshllb.u16" } } */
+
++/*
++**foo1:
++** ...
++** vshllb.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint16x8_t a)
+ {
+ return vshllbq (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshllb.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_n_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshllb.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint8x16_t a)
+ {
+ return vshllbq_n_u8 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshllb.u8" } } */
+
++/*
++**foo1:
++** ...
++** vshllb.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint8x16_t a)
+ {
+ return vshllbq (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshllb.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_x_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_x_n_s16.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshllbt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int16x8_t a, mve_pred16_t p)
+ {
+- return vshllbq_x_n_s16 (a, 1, p);
++ return vshllbq_x_n_s16 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshllbt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshllbt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
++int32x4_t
++foo1 (int16x8_t a, mve_pred16_t p)
++{
++ return vshllbq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_x_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_x_n_s8.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshllbt.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int8x16_t a, mve_pred16_t p)
+ {
+- return vshllbq_x_n_s8 (a, 1, p);
++ return vshllbq_x_n_s8 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshllbt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshllbt.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
++int16x8_t
++foo1 (int8x16_t a, mve_pred16_t p)
++{
++ return vshllbq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_x_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_x_n_u16.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshllbt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint16x8_t a, mve_pred16_t p)
+ {
+- return vshllbq_x_n_u16 (a, 1, p);
++ return vshllbq_x_n_u16 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshllbt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshllbt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo1 (uint16x8_t a, mve_pred16_t p)
++{
++ return vshllbq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_x_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_x_n_u8.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshllbt.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint8x16_t a, mve_pred16_t p)
+ {
+- return vshllbq_x_n_u8 (a, 1, p);
++ return vshllbq_x_n_u8 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshllbt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshllbt.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo1 (uint8x16_t a, mve_pred16_t p)
++{
++ return vshllbq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlltt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vshlltq_m_n_s16 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlltt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlltt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vshlltq_m (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlltt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_m_n_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlltt.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vshlltq_m_n_s8 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlltt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlltt.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vshlltq_m (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlltt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_m_n_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlltt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint16x8_t a, mve_pred16_t p)
+ {
+ return vshlltq_m_n_u16 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlltt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlltt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint16x8_t a, mve_pred16_t p)
+ {
+ return vshlltq_m (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlltt.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_m_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_m_n_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlltt.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint8x16_t a, mve_pred16_t p)
+ {
+ return vshlltq_m_n_u8 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlltt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlltt.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint8x16_t a, mve_pred16_t p)
+ {
+ return vshlltq_m (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlltt.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshllt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int16x8_t a)
+ {
+ return vshlltq_n_s16 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshllt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vshllt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int16x8_t a)
+ {
+ return vshlltq (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshllt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_n_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshllt.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int8x16_t a)
+ {
+ return vshlltq_n_s8 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshllt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vshllt.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int8x16_t a)
+ {
+ return vshlltq (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshllt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_n_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshllt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint16x8_t a)
+ {
+ return vshlltq_n_u16 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshllt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vshllt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint16x8_t a)
+ {
+ return vshlltq (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshllt.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_n_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshllt.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint8x16_t a)
+ {
+ return vshlltq_n_u8 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshllt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vshllt.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint8x16_t a)
+ {
+ return vshlltq (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshllt.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_x_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_x_n_s16.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlltt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int16x8_t a, mve_pred16_t p)
+ {
+- return vshlltq_x_n_s16 (a, 1, p);
++ return vshlltq_x_n_s16 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlltt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlltt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
++int32x4_t
++foo1 (int16x8_t a, mve_pred16_t p)
++{
++ return vshlltq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_x_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_x_n_s8.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlltt.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int8x16_t a, mve_pred16_t p)
+ {
+- return vshlltq_x_n_s8 (a, 1, p);
++ return vshlltq_x_n_s8 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlltt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlltt.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
++int16x8_t
++foo1 (int8x16_t a, mve_pred16_t p)
++{
++ return vshlltq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_x_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_x_n_u16.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlltt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint16x8_t a, mve_pred16_t p)
+ {
+- return vshlltq_x_n_u16 (a, 1, p);
++ return vshlltq_x_n_u16 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlltt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlltt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo1 (uint16x8_t a, mve_pred16_t p)
++{
++ return vshlltq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_x_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_x_n_u8.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlltt.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint8x16_t a, mve_pred16_t p)
+ {
+- return vshlltq_x_n_u8 (a, 1, p);
++ return vshlltq_x_n_u8 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlltt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlltt.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo1 (uint8x16_t a, mve_pred16_t p)
++{
++ return vshlltq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vshlq_m_n_s16 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+ return vshlq_m_n (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, mve_pred16_t p)
+ {
+ return vshlq_m_n_s32 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, mve_pred16_t p)
+ {
+ return vshlq_m_n (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_n_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vshlq_m_n_s8 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+ return vshlq_m_n (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_n_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, mve_pred16_t p)
+ {
+ return vshlq_m_n_u16 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, mve_pred16_t p)
+ {
+ return vshlq_m_n (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_n_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, mve_pred16_t p)
+ {
+ return vshlq_m_n_u32 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, mve_pred16_t p)
+ {
+ return vshlq_m_n (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_n_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
+ {
+ return vshlq_m_n_u8 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
+ {
+ return vshlq_m_n (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.s16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32_t b, mve_pred16_t p)
+ {
+ return vshlq_m_r_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.s16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32_t b, mve_pred16_t p)
+ {
+ return vshlq_m_r (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.s32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vshlq_m_r_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.s32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vshlq_m_r (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.s8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int32_t b, mve_pred16_t p)
+ {
+ return vshlq_m_r_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.s8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int32_t b, mve_pred16_t p)
+ {
+ return vshlq_m_r (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.u16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int32_t b, mve_pred16_t p)
+ {
+ return vshlq_m_r_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.u16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, int32_t b, mve_pred16_t p)
+ {
+ return vshlq_m_r (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.u32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vshlq_m_r_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.u32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vshlq_m_r (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.u8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int32_t b, mve_pred16_t p)
+ {
+ return vshlq_m_r_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.u8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, int32_t b, mve_pred16_t p)
+ {
+ return vshlq_m_r (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vshlq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vshlq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vshlq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vshlq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vshlq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vshlq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vshlq_m_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vshlq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vshlq_m_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vshlq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vshlq_m_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vshlq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshl.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a)
+ {
+ return vshlq_n_s16 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshl.s16" } } */
+
++/*
++**foo1:
++** ...
++** vshl.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a)
+ {
+ return vshlq_n (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshl.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshl.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a)
+ {
+- return vshlq_n_s32 (a, 16);
++ return vshlq_n_s32 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshl.s32" } } */
+
++/*
++**foo1:
++** ...
++** vshl.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a)
+ {
+- return vshlq_n (a, 16);
++ return vshlq_n (a, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vshl.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshl.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a)
+ {
+ return vshlq_n_s8 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshl.s8" } } */
+
++/*
++**foo1:
++** ...
++** vshl.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a)
+ {
+ return vshlq_n (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshl.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshl.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a)
+ {
+- return vshlq_n_u16 (a, 11);
++ return vshlq_n_u16 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshl.u16" } } */
+
++/*
++**foo1:
++** ...
++** vshl.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a)
+ {
+- return vshlq_n (a, 11);
++ return vshlq_n (a, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vshl.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshl.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a)
+ {
+ return vshlq_n_u32 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshl.u32" } } */
+
++/*
++**foo1:
++** ...
++** vshl.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a)
+ {
+ return vshlq_n (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshl.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshl.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a)
+ {
+ return vshlq_n_u8 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshl.u8" } } */
+
++/*
++**foo1:
++** ...
++** vshl.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a)
+ {
+ return vshlq_n (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshl.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshl.s16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32_t b)
+ {
+ return vshlq_r_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vshl.s16" } } */
+
++/*
++**foo1:
++** ...
++** vshl.s16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32_t b)
+ {
+ return vshlq_r (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vshl.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshl.s32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32_t b)
+ {
+ return vshlq_r_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vshl.s32" } } */
+
++/*
++**foo1:
++** ...
++** vshl.s32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32_t b)
+ {
+ return vshlq_r (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vshl.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshl.s8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int32_t b)
+ {
+ return vshlq_r_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vshl.s8" } } */
+
++/*
++**foo1:
++** ...
++** vshl.s8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int32_t b)
+ {
+ return vshlq_r (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vshl.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshl.u16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int32_t b)
+ {
+ return vshlq_r_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vshl.u16" } } */
+
++/*
++**foo1:
++** ...
++** vshl.u16 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, int32_t b)
+ {
+ return vshlq_r (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vshl.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshl.u32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, int32_t b)
+ {
+ return vshlq_r_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vshl.u32" } } */
+
++/*
++**foo1:
++** ...
++** vshl.u32 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, int32_t b)
+ {
+ return vshlq_r (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vshl.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshl.u8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int32_t b)
+ {
+ return vshlq_r_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vshl.u8" } } */
+
++/*
++**foo1:
++** ...
++** vshl.u8 q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, int32_t b)
+ {
+ return vshlq_r (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vshl.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshl.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vshlq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vshl.s16" } } */
+
++/*
++**foo1:
++** ...
++** vshl.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vshl.s16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshl.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vshlq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vshl.s32" } } */
+
++/*
++**foo1:
++** ...
++** vshl.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vshl.s32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshl.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vshlq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vshl.s8" } } */
+
++/*
++**foo1:
++** ...
++** vshl.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vshl.s8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshl.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int16x8_t b)
+ {
+ return vshlq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vshl.u16" } } */
+
++/*
++**foo1:
++** ...
++** vshl.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, int16x8_t b)
+ {
+ return vshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vshl.u16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshl.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, int32x4_t b)
+ {
+ return vshlq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vshl.u32" } } */
+
++/*
++**foo1:
++** ...
++** vshl.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, int32x4_t b)
+ {
+ return vshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vshl.u32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshl.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int8x16_t b)
+ {
+ return vshlq_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vshl.u8" } } */
+
++/*
++**foo1:
++** ...
++** vshl.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, int8x16_t b)
+ {
+ return vshlq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vshl.u8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_s16.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, mve_pred16_t p)
+ {
+- return vshlq_x_n_s16 (a, 1, p);
++ return vshlq_x_n_s16 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
++int16x8_t
++foo1 (int16x8_t a, mve_pred16_t p)
++{
++ return vshlq_x_n (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_s32.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, mve_pred16_t p)
+ {
+- return vshlq_x_n_s32 (a, 1, p);
++ return vshlq_x_n_s32 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
++int32x4_t
++foo1 (int32x4_t a, mve_pred16_t p)
++{
++ return vshlq_x_n (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_s8.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, mve_pred16_t p)
+ {
+- return vshlq_x_n_s8 (a, 1, p);
++ return vshlq_x_n_s8 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
++int8x16_t
++foo1 (int8x16_t a, mve_pred16_t p)
++{
++ return vshlq_x_n (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_u16.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, mve_pred16_t p)
+ {
+- return vshlq_x_n_u16 (a, 1, p);
++ return vshlq_x_n_u16 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo1 (uint16x8_t a, mve_pred16_t p)
++{
++ return vshlq_x_n (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_u32.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, mve_pred16_t p)
+ {
+- return vshlq_x_n_u32 (a, 1, p);
++ return vshlq_x_n_u32 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo1 (uint32x4_t a, mve_pred16_t p)
++{
++ return vshlq_x_n (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_u8.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, mve_pred16_t p)
+ {
+- return vshlq_x_n_u8 (a, 1, p);
++ return vshlq_x_n_u8 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo1 (uint8x16_t a, mve_pred16_t p)
++{
++ return vshlq_x_n (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_s16.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+- return vshlq_x_s16 (a, b, p);
++ return vshlq_x_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.s16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++int16x8_t
++foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
++{
++ return vshlq_x (a, b, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_s32.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+- return vshlq_x_s32 (a, b, p);
++ return vshlq_x_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.s32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++int32x4_t
++foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
++{
++ return vshlq_x (a, b, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_s8.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+- return vshlq_x_s8 (a, b, p);
++ return vshlq_x_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.s8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++int8x16_t
++foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
++{
++ return vshlq_x (a, b, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_u16.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+- return vshlq_x_u16 (a, b, p);
++ return vshlq_x_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.u16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo1 (uint16x8_t a, int16x8_t b, mve_pred16_t p)
++{
++ return vshlq_x (a, b, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_u32.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+- return vshlq_x_u32 (a, b, p);
++ return vshlq_x_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.u32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo1 (uint32x4_t a, int32x4_t b, mve_pred16_t p)
++{
++ return vshlq_x (a, b, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_u8.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+- return vshlq_x_u8 (a, b, p);
++ return vshlq_x_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshlt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshlt.u8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo1 (uint8x16_t a, int8x16_t b, mve_pred16_t p)
++{
++ return vshlq_x (a, b, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrnbq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrnbq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrnbt.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+- return vshrnbq_m_n_s16 (a, b, 8, p);
++ return vshrnbq_m_n_s16 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrnbt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrnbt.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+- return vshrnbq_m (a, b, 8, p);
++ return vshrnbq_m (a, b, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrnbt.i16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrnbq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrnbq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrnbt.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+- return vshrnbq_m_n_s32 (a, b, 16, p);
++ return vshrnbq_m_n_s32 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrnbt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrnbt.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+- return vshrnbq_m (a, b, 16, p);
++ return vshrnbq_m (a, b, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrnbt.i32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrnbq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrnbq_m_n_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrnbt.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
+ {
+- return vshrnbq_m_n_u16 (a, b, 8, p);
++ return vshrnbq_m_n_u16 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrnbt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrnbt.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
+ {
+- return vshrnbq_m (a, b, 8, p);
++ return vshrnbq_m (a, b, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrnbt.i16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrnbq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrnbq_m_n_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrnbt.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
+ {
+- return vshrnbq_m_n_u32 (a, b, 16, p);
++ return vshrnbq_m_n_u32 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrnbt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrnbt.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
+ {
+- return vshrnbq_m (a, b, 16, p);
++ return vshrnbq_m (a, b, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrnbt.i32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrnbq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrnbq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshrnb.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int16x8_t b)
+ {
+- return vshrnbq_n_s16 (a, b, 8);
++ return vshrnbq_n_s16 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshrnb.i16" } } */
+
++/*
++**foo1:
++** ...
++** vshrnb.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int16x8_t b)
+ {
+- return vshrnbq (a, b, 8);
++ return vshrnbq (a, b, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vshrnb.i16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrnbq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrnbq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshrnb.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32x4_t b)
+ {
+- return vshrnbq_n_s32 (a, b, 16);
++ return vshrnbq_n_s32 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshrnb.i32" } } */
+
++/*
++**foo1:
++** ...
++** vshrnb.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32x4_t b)
+ {
+- return vshrnbq (a, b, 16);
++ return vshrnbq (a, b, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vshrnb.i32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrnbq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrnbq_n_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshrnb.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint16x8_t b)
+ {
+- return vshrnbq_n_u16 (a, b, 8);
++ return vshrnbq_n_u16 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshrnb.i16" } } */
+
++/*
++**foo1:
++** ...
++** vshrnb.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint16x8_t b)
+ {
+- return vshrnbq (a, b, 8);
++ return vshrnbq (a, b, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vshrnb.i16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrnbq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrnbq_n_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshrnb.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint32x4_t b)
+ {
+- return vshrnbq_n_u32 (a, b, 16);
++ return vshrnbq_n_u32 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshrnb.i32" } } */
+
++/*
++**foo1:
++** ...
++** vshrnb.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint32x4_t b)
+ {
+- return vshrnbq (a, b, 16);
++ return vshrnbq (a, b, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vshrnb.i32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrntq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrntq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrntt.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+- return vshrntq_m_n_s16 (a, b, 8, p);
++ return vshrntq_m_n_s16 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrntt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrntt.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int16x8_t b, mve_pred16_t p)
+ {
+- return vshrntq_m (a, b, 8, p);
++ return vshrntq_m (a, b, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrntt.i16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrntq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrntq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrntt.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+- return vshrntq_m_n_s32 (a, b, 16, p);
++ return vshrntq_m_n_s32 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrntt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrntt.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32x4_t b, mve_pred16_t p)
+ {
+- return vshrntq_m (a, b, 16, p);
++ return vshrntq_m (a, b, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrntt.i32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrntq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrntq_m_n_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrntt.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
+ {
+- return vshrntq_m_n_u16 (a, b, 8, p);
++ return vshrntq_m_n_u16 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrntt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrntt.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
+ {
+- return vshrntq_m (a, b, 8, p);
++ return vshrntq_m (a, b, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrntt.i16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrntq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrntq_m_n_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrntt.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
+ {
+- return vshrntq_m_n_u32 (a, b, 16, p);
++ return vshrntq_m_n_u32 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrntt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrntt.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
+ {
+- return vshrntq_m (a, b, 16, p);
++ return vshrntq_m (a, b, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrntt.i32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrntq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrntq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshrnt.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int16x8_t b)
+ {
+- return vshrntq_n_s16 (a, b, 8);
++ return vshrntq_n_s16 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshrnt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vshrnt.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int16x8_t b)
+ {
+- return vshrntq (a, b, 8);
++ return vshrntq (a, b, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vshrnt.i16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrntq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrntq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshrnt.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int32x4_t b)
+ {
+- return vshrntq_n_s32 (a, b, 16);
++ return vshrntq_n_s32 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshrnt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vshrnt.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int32x4_t b)
+ {
+- return vshrntq (a, b, 16);
++ return vshrntq (a, b, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vshrnt.i32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrntq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrntq_n_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshrnt.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint16x8_t b)
+ {
+- return vshrntq_n_u16 (a, b, 8);
++ return vshrntq_n_u16 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshrnt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vshrnt.i16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint16x8_t b)
+ {
+- return vshrntq (a, b, 8);
++ return vshrntq (a, b, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vshrnt.i16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrntq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrntq_n_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshrnt.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint32x4_t b)
+ {
+- return vshrntq_n_u32 (a, b, 16);
++ return vshrntq_n_u32 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshrnt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vshrnt.i32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint32x4_t b)
+ {
+- return vshrntq (a, b, 16);
++ return vshrntq (a, b, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vshrnt.i32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+- return vshrq_m_n_s16 (inactive, a, 16, p);
++ return vshrq_m_n_s16 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, mve_pred16_t p)
+ {
+- return vshrq_m (inactive, a, 16, p);
++ return vshrq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrt.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, mve_pred16_t p)
+ {
+- return vshrq_m_n_s32 (inactive, a, 32, p);
++ return vshrq_m_n_s32 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, mve_pred16_t p)
+ {
+- return vshrq_m (inactive, a, 32, p);
++ return vshrq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrt.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_m_n_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrt.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+- return vshrq_m_n_s8 (inactive, a, 8, p);
++ return vshrq_m_n_s8 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrt.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, mve_pred16_t p)
+ {
+- return vshrq_m (inactive, a, 8, p);
++ return vshrq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrt.s8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_m_n_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, mve_pred16_t p)
+ {
+- return vshrq_m_n_u16 (inactive, a, 16, p);
++ return vshrq_m_n_u16 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, mve_pred16_t p)
+ {
+- return vshrq_m (inactive, a, 16, p);
++ return vshrq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrt.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_m_n_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrt.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, mve_pred16_t p)
+ {
+- return vshrq_m_n_u32 (inactive, a, 32, p);
++ return vshrq_m_n_u32 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrt.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, mve_pred16_t p)
+ {
+- return vshrq_m (inactive, a, 32, p);
++ return vshrq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrt.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_m_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_m_n_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrt.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
+ {
+- return vshrq_m_n_u8 (inactive, a, 8, p);
++ return vshrq_m_n_u8 (inactive, a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrt.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
+ {
+- return vshrq_m (inactive, a, 8, p);
++ return vshrq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrt.u8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshr.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a)
+ {
+- return vshrq_n_s16 (a, 16);
++ return vshrq_n_s16 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshr.s16" } } */
+
++/*
++**foo1:
++** ...
++** vshr.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a)
+ {
+- return vshrq (a, 16);
++ return vshrq (a, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vshr.s16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshr.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a)
+ {
+- return vshrq_n_s32 (a, 32);
++ return vshrq_n_s32 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshr.s32" } } */
+
++/*
++**foo1:
++** ...
++** vshr.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a)
+ {
+- return vshrq (a, 32);
++ return vshrq (a, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vshr.s32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_n_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshr.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a)
+ {
+- return vshrq_n_s8 (a, 8);
++ return vshrq_n_s8 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshr.s8" } } */
+
++/*
++**foo1:
++** ...
++** vshr.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a)
+ {
+- return vshrq (a, 8);
++ return vshrq (a, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vshr.s8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_n_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshr.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a)
+ {
+- return vshrq_n_u16 (a, 16);
++ return vshrq_n_u16 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshr.u16" } } */
+
++/*
++**foo1:
++** ...
++** vshr.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a)
+ {
+- return vshrq (a, 16);
++ return vshrq (a, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vshr.u16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_n_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshr.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a)
+ {
+- return vshrq_n_u32 (a, 32);
++ return vshrq_n_u32 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshr.u32" } } */
+
++/*
++**foo1:
++** ...
++** vshr.u32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a)
+ {
+- return vshrq (a, 32);
++ return vshrq (a, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vshr.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_n_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vshr.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a)
+ {
+- return vshrq_n_u8 (a, 8);
++ return vshrq_n_u8 (a, 1);
+ }
+
+-/* { dg-final { scan-assembler "vshr.u8" } } */
+
++/*
++**foo1:
++** ...
++** vshr.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a)
+ {
+- return vshrq (a, 8);
++ return vshrq (a, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vshr.u8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_x_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_x_n_s16.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, mve_pred16_t p)
+ {
+- return vshrq_x_n_s16 (a, 16, p);
++ return vshrq_x_n_s16 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrt.s16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrt.s16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
++int16x8_t
++foo1 (int16x8_t a, mve_pred16_t p)
++{
++ return vshrq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_x_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_x_n_s32.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, mve_pred16_t p)
+ {
+- return vshrq_x_n_s32 (a, 32, p);
++ return vshrq_x_n_s32 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrt.s32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrt.s32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
++int32x4_t
++foo1 (int32x4_t a, mve_pred16_t p)
++{
++ return vshrq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_x_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_x_n_s8.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrt.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, mve_pred16_t p)
+ {
+- return vshrq_x_n_s8 (a, 8, p);
++ return vshrq_x_n_s8 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrt.s8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrt.s8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
++int8x16_t
++foo1 (int8x16_t a, mve_pred16_t p)
++{
++ return vshrq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_x_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_x_n_u16.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, mve_pred16_t p)
+ {
+- return vshrq_x_n_u16 (a, 16, p);
++ return vshrq_x_n_u16 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrt.u16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrt.u16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo1 (uint16x8_t a, mve_pred16_t p)
++{
++ return vshrq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_x_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_x_n_u8.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrt.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, mve_pred16_t p)
+ {
+- return vshrq_x_n_u8 (a, 8, p);
++ return vshrq_x_n_u8 (a, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vshrt.u8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vshrt.u8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo1 (uint8x16_t a, mve_pred16_t p)
++{
++ return vshrq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vslit.16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+- return vsliq_m_n_s16 (a, b, 15, p);
++ return vsliq_m_n_s16 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vslit.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vslit.16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+- return vsliq_m (a, b, 15, p);
++ return vsliq_m (a, b, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vslit.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vslit.32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+- return vsliq_m_n_s32 (a, b, 31, p);
++ return vsliq_m_n_s32 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vslit.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vslit.32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+- return vsliq_m (a, b, 31, p);
++ return vsliq_m (a, b, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vslit.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_m_n_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vslit.8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+- return vsliq_m_n_s8 (a, b, 7, p);
++ return vsliq_m_n_s8 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vslit.8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vslit.8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+- return vsliq_m (a, b, 7, p);
++ return vsliq_m (a, b, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vslit.8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_m_n_u16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vslit.16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+- return vsliq_m_n_u16 (a, b, 15, p);
++ return vsliq_m_n_u16 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vslit.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vslit.16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+- return vsliq_m (a, b, 15, p);
++ return vsliq_m (a, b, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vslit.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_m_n_u32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vslit.32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+- return vsliq_m_n_u32 (a, b, 31, p);
++ return vsliq_m_n_u32 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vslit.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vslit.32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+- return vsliq_m (a, b, 31, p);
++ return vsliq_m (a, b, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vslit.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_m_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_m_n_u8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vslit.8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+- return vsliq_m_n_u8 (a, b, 7, p);
++ return vsliq_m_n_u8 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vslit.8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vslit.8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+- return vsliq_m (a, b, 7, p);
++ return vsliq_m (a, b, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vslit.8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vsli.16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+- return vsliq_n_s16 (a, b, 15);
++ return vsliq_n_s16 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vsli.16" } } */
+
++/*
++**foo1:
++** ...
++** vsli.16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+- return vsliq (a, b, 15);
++ return vsliq (a, b, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vsli.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vsli.32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+- return vsliq_n_s32 (a, b, 31);
++ return vsliq_n_s32 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vsli.32" } } */
+
++/*
++**foo1:
++** ...
++** vsli.32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+- return vsliq (a, b, 31);
++ return vsliq (a, b, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vsli.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vsli.8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+- return vsliq_n_s8 (a, b, 7);
++ return vsliq_n_s8 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vsli.8" } } */
+
++/*
++**foo1:
++** ...
++** vsli.8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+- return vsliq (a, b, 7);
++ return vsliq (a, b, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vsli.8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vsli.16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b)
+ {
+- return vsliq_n_u16 (a, b, 15);
++ return vsliq_n_u16 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vsli.16" } } */
+
++/*
++**foo1:
++** ...
++** vsli.16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b)
+ {
+- return vsliq (a, b, 15);
++ return vsliq (a, b, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vsli.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vsli.32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b)
+ {
+- return vsliq_n_u32 (a, b, 31);
++ return vsliq_n_u32 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vsli.32" } } */
+
++/*
++**foo1:
++** ...
++** vsli.32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b)
+ {
+- return vsliq (a, b, 31);
++ return vsliq (a, b, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vsli.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vsli.8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+- return vsliq_n_u8 (a, b, 7);
++ return vsliq_n_u8 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vsli.8" } } */
+
++/*
++**foo1:
++** ...
++** vsli.8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b)
+ {
+- return vsliq (a, b, 7);
++ return vsliq (a, b, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vsli.8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_m_n_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsrit.16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+- return vsriq_m_n_s16 (a, b, 4, p);
++ return vsriq_m_n_s16 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsrit.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsrit.16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+- return vsriq_m (a, b, 4, p);
++ return vsriq_m (a, b, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_m_n_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsrit.32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+- return vsriq_m_n_s32 (a, b, 2, p);
++ return vsriq_m_n_s32 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsrit.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsrit.32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+- return vsriq_m (a, b, 2, p);
++ return vsriq_m (a, b, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_m_n_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsrit.8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+- return vsriq_m_n_s8 (a, b, 4, p);
++ return vsriq_m_n_s8 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsrit.8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsrit.8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+- return vsriq_m (a, b, 4, p);
++ return vsriq_m (a, b, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_m_n_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsrit.16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+- return vsriq_m_n_u16 (a, b, 4, p);
++ return vsriq_m_n_u16 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsrit.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsrit.16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+- return vsriq_m (a, b, 4, p);
++ return vsriq_m (a, b, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_m_n_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsrit.32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+- return vsriq_m_n_u32 (a, b, 4, p);
++ return vsriq_m_n_u32 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsrit.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsrit.32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+- return vsriq_m (a, b, 4, p);
++ return vsriq_m (a, b, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_m_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_m_n_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsrit.8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+- return vsriq_m_n_u8 (a, b, 4, p);
++ return vsriq_m_n_u8 (a, b, 1, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsrit.8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsrit.8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+- return vsriq_m (a, b, 4, p);
++ return vsriq_m (a, b, 1, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vpst" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vsri.16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+- return vsriq_n_s16 (a, b, 4);
++ return vsriq_n_s16 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vsri.16" } } */
+
++/*
++**foo1:
++** ...
++** vsri.16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+- return vsriq (a, b, 4);
++ return vsriq (a, b, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vsri.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vsri.32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+- return vsriq_n_s32 (a, b, 4);
++ return vsriq_n_s32 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vsri.32" } } */
+
++/*
++**foo1:
++** ...
++** vsri.32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+- return vsriq (a, b, 4);
++ return vsriq (a, b, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vsri.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vsri.8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+- return vsriq_n_s8 (a, b, 4);
++ return vsriq_n_s8 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vsri.8" } } */
+
++/*
++**foo1:
++** ...
++** vsri.8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+- return vsriq (a, b, 4);
++ return vsriq (a, b, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vsri.8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vsri.16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b)
+ {
+- return vsriq_n_u16 (a, b, 4);
++ return vsriq_n_u16 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vsri.16" } } */
+
++/*
++**foo1:
++** ...
++** vsri.16 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b)
+ {
+- return vsriq (a, b, 4);
++ return vsriq (a, b, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vsri.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vsri.32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b)
+ {
+- return vsriq_n_u32 (a, b, 4);
++ return vsriq_n_u32 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vsri.32" } } */
+
++/*
++**foo1:
++** ...
++** vsri.32 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b)
+ {
+- return vsriq (a, b, 4);
++ return vsriq (a, b, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vsri.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vsri.8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+- return vsriq_n_u8 (a, b, 4);
++ return vsriq_n_u8 (a, b, 1);
+ }
+
+-/* { dg-final { scan-assembler "vsri.8" } } */
+
++/*
++**foo1:
++** ...
++** vsri.8 q[0-9]+, q[0-9]+, #[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b)
+ {
+- return vsriq (a, b, 4);
++ return vsriq (a, b, 1);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vsri.8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_f16.c
+@@ -1,25 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
+-void
+-foo (float16_t * addr, float16x8_t value)
+-{
+- vst1q_f16 (addr, value);
+-}
++#ifdef __cplusplus
++extern "C" {
++#endif
+
++/*
++**foo:
++** ...
++** vstrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (float16_t * addr, float16x8_t value)
++foo (float16_t *base, float16x8_t value)
+ {
+- vst1q (addr, value);
++ return vst1q_f16 (base, value);
+ }
+
+-/* { dg-final { scan-assembler-times "vstrh.16" 2 } } */
+
++/*
++**foo1:
++** ...
++** vstrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo2 (float16_t a, float16x8_t x)
++foo1 (float16_t *base, float16x8_t value)
+ {
+- vst1q (&a, x);
++ return vst1q (base, value);
+ }
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (float32_t * addr, float32x4_t value)
++foo (float32_t *base, float32x4_t value)
+ {
+- vst1q_f32 (addr, value);
++ return vst1q_f32 (base, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrw.32" } } */
+
++/*
++**foo1:
++** ...
++** vstrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (float32_t * addr, float32x4_t value)
++foo1 (float32_t *base, float32x4_t value)
+ {
+- vst1q (addr, value);
++ return vst1q (base, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrw.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_p_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_p_f16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (float16_t * addr, float16x8_t value, mve_pred16_t p)
++foo (float16_t *base, float16x8_t value, mve_pred16_t p)
+ {
+- vst1q_p_f16 (addr, value, p);
++ return vst1q_p_f16 (base, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrht.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (float16_t * addr, float16x8_t value, mve_pred16_t p)
++foo1 (float16_t *base, float16x8_t value, mve_pred16_t p)
+ {
+- vst1q_p (addr, value, p);
++ return vst1q_p (base, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrht.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_p_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_p_f32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (float32_t * addr, float32x4_t value, mve_pred16_t p)
++foo (float32_t *base, float32x4_t value, mve_pred16_t p)
+ {
+- vst1q_p_f32 (addr, value, p);
++ return vst1q_p_f32 (base, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrwt.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (float32_t * addr, float32x4_t value, mve_pred16_t p)
++foo1 (float32_t *base, float32x4_t value, mve_pred16_t p)
+ {
+- vst1q_p (addr, value, p);
++ return vst1q_p (base, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrwt.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_p_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_p_s16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (int16_t * addr, int16x8_t value, mve_pred16_t p)
++foo (int16_t *base, int16x8_t value, mve_pred16_t p)
+ {
+- vst1q_p_s16 (addr, value, p);
++ return vst1q_p_s16 (base, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrht.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int16_t * addr, int16x8_t value, mve_pred16_t p)
++foo1 (int16_t *base, int16x8_t value, mve_pred16_t p)
+ {
+- vst1q_p (addr, value, p);
++ return vst1q_p (base, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrht.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (int32_t * addr, int32x4_t value, mve_pred16_t p)
++foo (int32_t *base, int32x4_t value, mve_pred16_t p)
+ {
+- vst1q_p_s32 (addr, value, p);
++ return vst1q_p_s32 (base, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrwt.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int32_t * addr, int32x4_t value, mve_pred16_t p)
++foo1 (int32_t *base, int32x4_t value, mve_pred16_t p)
+ {
+- vst1q_p (addr, value, p);
++ return vst1q_p (base, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrwt.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_p_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_p_s8.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrbt.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (int8_t * addr, int8x16_t value, mve_pred16_t p)
++foo (int8_t *base, int8x16_t value, mve_pred16_t p)
+ {
+- vst1q_p_s8 (addr, value, p);
++ return vst1q_p_s8 (base, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrbt.8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrbt.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int8_t * addr, int8x16_t value, mve_pred16_t p)
++foo1 (int8_t *base, int8x16_t value, mve_pred16_t p)
+ {
+- vst1q_p (addr, value, p);
++ return vst1q_p (base, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrbt.8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_p_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_p_u16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint16_t * addr, uint16x8_t value, mve_pred16_t p)
++foo (uint16_t *base, uint16x8_t value, mve_pred16_t p)
+ {
+- vst1q_p_u16 (addr, value, p);
++ return vst1q_p_u16 (base, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrht.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint16_t * addr, uint16x8_t value, mve_pred16_t p)
++foo1 (uint16_t *base, uint16x8_t value, mve_pred16_t p)
+ {
+- vst1q_p (addr, value, p);
++ return vst1q_p (base, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrht.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_p_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_p_u32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint32_t * addr, uint32x4_t value, mve_pred16_t p)
++foo (uint32_t *base, uint32x4_t value, mve_pred16_t p)
+ {
+- vst1q_p_u32 (addr, value, p);
++ return vst1q_p_u32 (base, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrwt.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint32_t * addr, uint32x4_t value, mve_pred16_t p)
++foo1 (uint32_t *base, uint32x4_t value, mve_pred16_t p)
+ {
+- vst1q_p (addr, value, p);
++ return vst1q_p (base, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrwt.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_p_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_p_u8.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrbt.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint8_t * addr, uint8x16_t value, mve_pred16_t p)
++foo (uint8_t *base, uint8x16_t value, mve_pred16_t p)
+ {
+- vst1q_p_u8 (addr, value, p);
++ return vst1q_p_u8 (base, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrbt.8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrbt.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint8_t * addr, uint8x16_t value, mve_pred16_t p)
++foo1 (uint8_t *base, uint8x16_t value, mve_pred16_t p)
+ {
+- vst1q_p (addr, value, p);
++ return vst1q_p (base, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrbt.8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_s16.c
+@@ -1,25 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
+-void
+-foo (int16_t * addr, int16x8_t value)
+-{
+- vst1q_s16 (addr, value);
+-}
++#ifdef __cplusplus
++extern "C" {
++#endif
+
++/*
++**foo:
++** ...
++** vstrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int16_t * addr, int16x8_t value)
++foo (int16_t *base, int16x8_t value)
+ {
+- vst1q (addr, value);
++ return vst1q_s16 (base, value);
+ }
+
+-/* { dg-final { scan-assembler-times "vstrh.16" 2 } } */
+
++/*
++**foo1:
++** ...
++** vstrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo2 (int16_t a, int16x8_t x)
++foo1 (int16_t *base, int16x8_t value)
+ {
+- vst1q (&a, x);
++ return vst1q (base, value);
+ }
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (int32_t * addr, int32x4_t value)
++foo (int32_t *base, int32x4_t value)
+ {
+- vst1q_s32 (addr, value);
++ return vst1q_s32 (base, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrw.32" } } */
+
++/*
++**foo1:
++** ...
++** vstrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int32_t * addr, int32x4_t value)
++foo1 (int32_t *base, int32x4_t value)
+ {
+- vst1q (addr, value);
++ return vst1q (base, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrw.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_s8.c
+@@ -1,25 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
+-void
+-foo (int8_t * addr, int8x16_t value)
+-{
+- vst1q_s8 (addr, value);
+-}
++#ifdef __cplusplus
++extern "C" {
++#endif
+
++/*
++**foo:
++** ...
++** vstrb.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int8_t * addr, int8x16_t value)
++foo (int8_t *base, int8x16_t value)
+ {
+- vst1q (addr, value);
++ return vst1q_s8 (base, value);
+ }
+
+-/* { dg-final { scan-assembler-times "vstrb.8" 2 } } */
+
++/*
++**foo1:
++** ...
++** vstrb.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo2 (int8_t a, int8x16_t x)
++foo1 (int8_t *base, int8x16_t value)
+ {
+- vst1q (&a, x);
++ return vst1q (base, value);
+ }
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_u16.c
+@@ -1,25 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
+-void
+-foo (uint16_t * addr, uint16x8_t value)
+-{
+- vst1q_u16 (addr, value);
+-}
++#ifdef __cplusplus
++extern "C" {
++#endif
+
++/*
++**foo:
++** ...
++** vstrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint16_t * addr, uint16x8_t value)
++foo (uint16_t *base, uint16x8_t value)
+ {
+- vst1q (addr, value);
++ return vst1q_u16 (base, value);
+ }
+
+-/* { dg-final { scan-assembler-times "vstrh.16" 2 } } */
+
++/*
++**foo1:
++** ...
++** vstrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo2 (uint16_t a, uint16x8_t x)
++foo1 (uint16_t *base, uint16x8_t value)
+ {
+- vst1q (&a, x);
++ return vst1q (base, value);
+ }
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint32_t * addr, uint32x4_t value)
++foo (uint32_t *base, uint32x4_t value)
+ {
+- vst1q_u32 (addr, value);
++ return vst1q_u32 (base, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrw.32" } } */
+
++/*
++**foo1:
++** ...
++** vstrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint32_t * addr, uint32x4_t value)
++foo1 (uint32_t *base, uint32x4_t value)
+ {
+- vst1q (addr, value);
++ return vst1q (base, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrw.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst1q_u8.c
+@@ -1,25 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
+-void
+-foo (uint8_t * addr, uint8x16_t value)
+-{
+- vst1q_u8 (addr, value);
+-}
++#ifdef __cplusplus
++extern "C" {
++#endif
+
++/*
++**foo:
++** ...
++** vstrb.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint8_t * addr, uint8x16_t value)
++foo (uint8_t *base, uint8x16_t value)
+ {
+- vst1q (addr, value);
++ return vst1q_u8 (base, value);
+ }
+
+-/* { dg-final { scan-assembler-times "vstrb.8" 2 } } */
+
++/*
++**foo1:
++** ...
++** vstrb.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo2 (uint8_t a, uint8x16_t x)
++foo1 (uint8_t *base, uint8x16_t value)
+ {
+- vst1q (&a, x);
++ return vst1q (base, value);
+ }
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst2q_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst2q_f16.c
+@@ -1,22 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vst20.16 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vst21.16 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (float16_t * addr, float16x8x2_t value)
++foo (float16_t *addr, float16x8x2_t value)
+ {
+- vst2q_f16 (addr, value);
++ return vst2q_f16 (addr, value);
+ }
+
+-/* { dg-final { scan-assembler "vst20.16" } } */
+-/* { dg-final { scan-assembler "vst21.16" } } */
+
++/*
++**foo1:
++** ...
++** vst20.16 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vst21.16 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (float16_t * addr, float16x8x2_t value)
++foo1 (float16_t *addr, float16x8x2_t value)
+ {
+- vst2q (addr, value);
++ return vst2q (addr, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vst20.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst2q_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst2q_f32.c
+@@ -1,22 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vst20.32 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vst21.32 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (float32_t * addr, float32x4x2_t value)
++foo (float32_t *addr, float32x4x2_t value)
+ {
+- vst2q_f32 (addr, value);
++ return vst2q_f32 (addr, value);
+ }
+
+-/* { dg-final { scan-assembler "vst20.32" } } */
+-/* { dg-final { scan-assembler "vst21.32" } } */
+
++/*
++**foo1:
++** ...
++** vst20.32 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vst21.32 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (float32_t * addr, float32x4x2_t value)
++foo1 (float32_t *addr, float32x4x2_t value)
+ {
+- vst2q (addr, value);
++ return vst2q (addr, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vst20.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst2q_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst2q_s16.c
+@@ -1,22 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vst20.16 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vst21.16 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (int16_t * addr, int16x8x2_t value)
++foo (int16_t *addr, int16x8x2_t value)
+ {
+- vst2q_s16 (addr, value);
++ return vst2q_s16 (addr, value);
+ }
+
+-/* { dg-final { scan-assembler "vst20.16" } } */
+-/* { dg-final { scan-assembler "vst21.16" } } */
+
++/*
++**foo1:
++** ...
++** vst20.16 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vst21.16 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int16_t * addr, int16x8x2_t value)
++foo1 (int16_t *addr, int16x8x2_t value)
+ {
+- vst2q (addr, value);
++ return vst2q (addr, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vst20.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst2q_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst2q_s32.c
+@@ -1,22 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vst20.32 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vst21.32 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (int32_t * addr, int32x4x2_t value)
++foo (int32_t *addr, int32x4x2_t value)
+ {
+- vst2q_s32 (addr, value);
++ return vst2q_s32 (addr, value);
+ }
+
+-/* { dg-final { scan-assembler "vst20.32" } } */
+-/* { dg-final { scan-assembler "vst21.32" } } */
+
++/*
++**foo1:
++** ...
++** vst20.32 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vst21.32 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int32_t * addr, int32x4x2_t value)
++foo1 (int32_t *addr, int32x4x2_t value)
+ {
+- vst2q (addr, value);
++ return vst2q (addr, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vst20.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst2q_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst2q_s8.c
+@@ -1,22 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vst20.8 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vst21.8 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (int8_t * addr, int8x16x2_t value)
++foo (int8_t *addr, int8x16x2_t value)
+ {
+- vst2q_s8 (addr, value);
++ return vst2q_s8 (addr, value);
+ }
+
+-/* { dg-final { scan-assembler "vst20.8" } } */
+-/* { dg-final { scan-assembler "vst21.8" } } */
+
++/*
++**foo1:
++** ...
++** vst20.8 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vst21.8 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int8_t * addr, int8x16x2_t value)
++foo1 (int8_t *addr, int8x16x2_t value)
+ {
+- vst2q (addr, value);
++ return vst2q (addr, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vst20.8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst2q_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst2q_u16.c
+@@ -1,22 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vst20.16 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vst21.16 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint16_t * addr, uint16x8x2_t value)
++foo (uint16_t *addr, uint16x8x2_t value)
+ {
+- vst2q_u16 (addr, value);
++ return vst2q_u16 (addr, value);
+ }
+
+-/* { dg-final { scan-assembler "vst20.16" } } */
+-/* { dg-final { scan-assembler "vst21.16" } } */
+
++/*
++**foo1:
++** ...
++** vst20.16 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vst21.16 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint16_t * addr, uint16x8x2_t value)
++foo1 (uint16_t *addr, uint16x8x2_t value)
+ {
+- vst2q (addr, value);
++ return vst2q (addr, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vst20.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst2q_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst2q_u32.c
+@@ -1,22 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vst20.32 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vst21.32 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint32_t * addr, uint32x4x2_t value)
++foo (uint32_t *addr, uint32x4x2_t value)
+ {
+- vst2q_u32 (addr, value);
++ return vst2q_u32 (addr, value);
+ }
+
+-/* { dg-final { scan-assembler "vst20.32" } } */
+-/* { dg-final { scan-assembler "vst21.32" } } */
+
++/*
++**foo1:
++** ...
++** vst20.32 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vst21.32 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint32_t * addr, uint32x4x2_t value)
++foo1 (uint32_t *addr, uint32x4x2_t value)
+ {
+- vst2q (addr, value);
++ return vst2q (addr, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vst20.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst2q_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst2q_u8.c
+@@ -1,22 +1,45 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vst20.8 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vst21.8 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint8_t * addr, uint8x16x2_t value)
++foo (uint8_t *addr, uint8x16x2_t value)
+ {
+- vst2q_u8 (addr, value);
++ return vst2q_u8 (addr, value);
+ }
+
+-/* { dg-final { scan-assembler "vst20.8" } } */
+-/* { dg-final { scan-assembler "vst21.8" } } */
+
++/*
++**foo1:
++** ...
++** vst20.8 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++** vst21.8 {q[0-9]+, q[0-9]+}, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint8_t * addr, uint8x16x2_t value)
++foo1 (uint8_t *addr, uint8x16x2_t value)
+ {
+- vst2q (addr, value);
++ return vst2q (addr, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vst20.8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst4q_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst4q_f16.c
+@@ -1,37 +1,47 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vst40.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst41.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst42.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst43.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ void
+-foo (float16_t * addr, float16x8x4_t value)
++foo (float16_t *addr, float16x8x4_t value)
+ {
+- vst4q_f16 (addr, value);
++ return vst4q_f16 (addr, value);
+ }
+
+-/* { dg-final { scan-assembler "vst40.16" } } */
+-/* { dg-final { scan-assembler "vst41.16" } } */
+-/* { dg-final { scan-assembler "vst42.16" } } */
+-/* { dg-final { scan-assembler "vst43.16" } } */
+
++/*
++**foo1:
++** ...
++** vst40.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst41.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst42.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst43.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ void
+-foo1 (float16_t * addr, float16x8x4_t value)
++foo1 (float16_t *addr, float16x8x4_t value)
+ {
+- vst4q (addr, value);
++ return vst4q (addr, value);
+ }
+
+-/* { dg-final { scan-assembler "vst40.16" } } */
+-/* { dg-final { scan-assembler "vst41.16" } } */
+-/* { dg-final { scan-assembler "vst42.16" } } */
+-/* { dg-final { scan-assembler "vst43.16" } } */
+-
+-void
+-foo2 (float16_t * addr, float16x8x4_t value)
+-{
+- vst4q_f16 (addr, value);
+- addr += 32;
+- vst4q_f16 (addr, value);
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler {vst43.16\s\{.*\}, \[.*\]!} } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst4q_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst4q_f32.c
+@@ -1,37 +1,47 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vst40.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst41.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst42.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst43.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ void
+-foo (float32_t * addr, float32x4x4_t value)
++foo (float32_t *addr, float32x4x4_t value)
+ {
+- vst4q_f32 (addr, value);
++ return vst4q_f32 (addr, value);
+ }
+
+-/* { dg-final { scan-assembler "vst40.32" } } */
+-/* { dg-final { scan-assembler "vst41.32" } } */
+-/* { dg-final { scan-assembler "vst42.32" } } */
+-/* { dg-final { scan-assembler "vst43.32" } } */
+
++/*
++**foo1:
++** ...
++** vst40.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst41.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst42.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst43.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ void
+-foo1 (float32_t * addr, float32x4x4_t value)
++foo1 (float32_t *addr, float32x4x4_t value)
+ {
+- vst4q (addr, value);
++ return vst4q (addr, value);
+ }
+
+-/* { dg-final { scan-assembler "vst40.32" } } */
+-/* { dg-final { scan-assembler "vst41.32" } } */
+-/* { dg-final { scan-assembler "vst42.32" } } */
+-/* { dg-final { scan-assembler "vst43.32" } } */
+-
+-void
+-foo2 (float32_t * addr, float32x4x4_t value)
+-{
+- vst4q_f32 (addr, value);
+- addr += 16;
+- vst4q_f32 (addr, value);
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler {vst43.32\s\{.*\}, \[.*\]!} } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst4q_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst4q_s16.c
+@@ -1,37 +1,47 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vst40.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst41.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst42.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst43.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ void
+-foo (int16_t * addr, int16x8x4_t value)
++foo (int16_t *addr, int16x8x4_t value)
+ {
+- vst4q_s16 (addr, value);
++ return vst4q_s16 (addr, value);
+ }
+
+-/* { dg-final { scan-assembler "vst40.16" } } */
+-/* { dg-final { scan-assembler "vst41.16" } } */
+-/* { dg-final { scan-assembler "vst42.16" } } */
+-/* { dg-final { scan-assembler "vst43.16" } } */
+
++/*
++**foo1:
++** ...
++** vst40.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst41.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst42.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst43.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ void
+-foo1 (int16_t * addr, int16x8x4_t value)
++foo1 (int16_t *addr, int16x8x4_t value)
+ {
+- vst4q (addr, value);
++ return vst4q (addr, value);
+ }
+
+-/* { dg-final { scan-assembler "vst40.16" } } */
+-/* { dg-final { scan-assembler "vst41.16" } } */
+-/* { dg-final { scan-assembler "vst42.16" } } */
+-/* { dg-final { scan-assembler "vst43.16" } } */
+-
+-void
+-foo2 (int16_t * addr, int16x8x4_t value)
+-{
+- vst4q_s16 (addr, value);
+- addr += 32;
+- vst4q_s16 (addr, value);
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler {vst43.16\s\{.*\}, \[.*\]!} } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst4q_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst4q_s32.c
+@@ -1,37 +1,47 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vst40.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst41.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst42.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst43.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ void
+-foo (int32_t * addr, int32x4x4_t value)
++foo (int32_t *addr, int32x4x4_t value)
+ {
+- vst4q_s32 (addr, value);
++ return vst4q_s32 (addr, value);
+ }
+
+-/* { dg-final { scan-assembler "vst40.32" } } */
+-/* { dg-final { scan-assembler "vst41.32" } } */
+-/* { dg-final { scan-assembler "vst42.32" } } */
+-/* { dg-final { scan-assembler "vst43.32" } } */
+
++/*
++**foo1:
++** ...
++** vst40.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst41.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst42.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst43.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ void
+-foo1 (int32_t * addr, int32x4x4_t value)
++foo1 (int32_t *addr, int32x4x4_t value)
+ {
+- vst4q (addr, value);
++ return vst4q (addr, value);
+ }
+
+-/* { dg-final { scan-assembler "vst40.32" } } */
+-/* { dg-final { scan-assembler "vst41.32" } } */
+-/* { dg-final { scan-assembler "vst42.32" } } */
+-/* { dg-final { scan-assembler "vst43.32" } } */
+-
+-void
+-foo2 (int32_t * addr, int32x4x4_t value)
+-{
+- vst4q_s32 (addr, value);
+- addr += 16;
+- vst4q_s32 (addr, value);
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler {vst43.32\s\{.*\}, \[.*\]!} } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst4q_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst4q_s8.c
+@@ -1,37 +1,47 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vst40.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst41.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst42.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst43.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ void
+-foo (int8_t * addr, int8x16x4_t value)
++foo (int8_t *addr, int8x16x4_t value)
+ {
+- vst4q_s8 (addr, value);
++ return vst4q_s8 (addr, value);
+ }
+
+-/* { dg-final { scan-assembler "vst40.8" } } */
+-/* { dg-final { scan-assembler "vst41.8" } } */
+-/* { dg-final { scan-assembler "vst42.8" } } */
+-/* { dg-final { scan-assembler "vst43.8" } } */
+
++/*
++**foo1:
++** ...
++** vst40.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst41.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst42.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst43.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ void
+-foo1 (int8_t * addr, int8x16x4_t value)
++foo1 (int8_t *addr, int8x16x4_t value)
+ {
+- vst4q (addr, value);
++ return vst4q (addr, value);
+ }
+
+-/* { dg-final { scan-assembler "vst40.8" } } */
+-/* { dg-final { scan-assembler "vst41.8" } } */
+-/* { dg-final { scan-assembler "vst42.8" } } */
+-/* { dg-final { scan-assembler "vst43.8" } } */
+-
+-void
+-foo2 (int8_t * addr, int8x16x4_t value)
+-{
+- vst4q_s8 (addr, value);
+- addr += 16*4;
+- vst4q_s8 (addr, value);
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler {vst43.8\s\{.*\}, \[.*\]!} } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst4q_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst4q_u16.c
+@@ -1,37 +1,47 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vst40.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst41.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst42.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst43.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ void
+-foo (uint16_t * addr, uint16x8x4_t value)
++foo (uint16_t *addr, uint16x8x4_t value)
+ {
+- vst4q_u16 (addr, value);
++ return vst4q_u16 (addr, value);
+ }
+
+-/* { dg-final { scan-assembler "vst40.16" } } */
+-/* { dg-final { scan-assembler "vst41.16" } } */
+-/* { dg-final { scan-assembler "vst42.16" } } */
+-/* { dg-final { scan-assembler "vst43.16" } } */
+
++/*
++**foo1:
++** ...
++** vst40.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst41.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst42.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst43.16 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ void
+-foo1 (uint16_t * addr, uint16x8x4_t value)
++foo1 (uint16_t *addr, uint16x8x4_t value)
+ {
+- vst4q (addr, value);
++ return vst4q (addr, value);
+ }
+
+-/* { dg-final { scan-assembler "vst40.16" } } */
+-/* { dg-final { scan-assembler "vst41.16" } } */
+-/* { dg-final { scan-assembler "vst42.16" } } */
+-/* { dg-final { scan-assembler "vst43.16" } } */
+-
+-void
+-foo2 (uint16_t * addr, uint16x8x4_t value)
+-{
+- vst4q_u16 (addr, value);
+- addr += 32;
+- vst4q_u16 (addr, value);
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler {vst43.16\s\{.*\}, \[.*\]!} } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst4q_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst4q_u32.c
+@@ -1,37 +1,47 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vst40.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst41.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst42.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst43.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ void
+-foo (uint32_t * addr, uint32x4x4_t value)
++foo (uint32_t *addr, uint32x4x4_t value)
+ {
+- vst4q_u32 (addr, value);
++ return vst4q_u32 (addr, value);
+ }
+
+-/* { dg-final { scan-assembler "vst40.32" } } */
+-/* { dg-final { scan-assembler "vst41.32" } } */
+-/* { dg-final { scan-assembler "vst42.32" } } */
+-/* { dg-final { scan-assembler "vst43.32" } } */
+
++/*
++**foo1:
++** ...
++** vst40.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst41.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst42.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst43.32 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ void
+-foo1 (uint32_t * addr, uint32x4x4_t value)
++foo1 (uint32_t *addr, uint32x4x4_t value)
+ {
+- vst4q (addr, value);
++ return vst4q (addr, value);
+ }
+
+-/* { dg-final { scan-assembler "vst40.32" } } */
+-/* { dg-final { scan-assembler "vst41.32" } } */
+-/* { dg-final { scan-assembler "vst42.32" } } */
+-/* { dg-final { scan-assembler "vst43.32" } } */
+-
+-void
+-foo2 (uint32_t * addr, uint32x4x4_t value)
+-{
+- vst4q_u32 (addr, value);
+- addr += 16;
+- vst4q_u32 (addr, value);
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler {vst43.32\s\{.*\}, \[.*\]!} } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst4q_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vst4q_u8.c
+@@ -1,37 +1,47 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vst40.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst41.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst42.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst43.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ void
+-foo (uint8_t * addr, uint8x16x4_t value)
++foo (uint8_t *addr, uint8x16x4_t value)
+ {
+- vst4q_u8 (addr, value);
++ return vst4q_u8 (addr, value);
+ }
+
+-/* { dg-final { scan-assembler "vst40.8" } } */
+-/* { dg-final { scan-assembler "vst41.8" } } */
+-/* { dg-final { scan-assembler "vst42.8" } } */
+-/* { dg-final { scan-assembler "vst43.8" } } */
+
++/*
++**foo1:
++** ...
++** vst40.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst41.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst42.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** vst43.8 {q[0-9+], q[0-9+], q[0-9+], q[0-9+]}, \[r[0-9+]\]
++** ...
++*/
+ void
+-foo1 (uint8_t * addr, uint8x16x4_t value)
++foo1 (uint8_t *addr, uint8x16x4_t value)
+ {
+- vst4q (addr, value);
++ return vst4q (addr, value);
+ }
+
+-/* { dg-final { scan-assembler "vst40.8" } } */
+-/* { dg-final { scan-assembler "vst41.8" } } */
+-/* { dg-final { scan-assembler "vst42.8" } } */
+-/* { dg-final { scan-assembler "vst43.8" } } */
+-
+-void
+-foo2 (uint8_t * addr, uint8x16x4_t value)
+-{
+- vst4q_u8 (addr, value);
+- addr += 16*4;
+- vst4q_u8 (addr, value);
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler {vst43.8\s\{.*\}, \[.*\]!} } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_p_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_p_s16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrbt.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (int8_t * addr, int16x8_t value, mve_pred16_t p)
++foo (int8_t *base, int16x8_t value, mve_pred16_t p)
+ {
+- vstrbq_p_s16 (addr, value, p);
++ return vstrbq_p_s16 (base, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrbt.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrbt.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int8_t * addr, int16x8_t value, mve_pred16_t p)
++foo1 (int8_t *base, int16x8_t value, mve_pred16_t p)
+ {
+- vstrbq_p (addr, value, p);
++ return vstrbq_p (base, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrbt.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrbt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (int8_t * addr, int32x4_t value, mve_pred16_t p)
++foo (int8_t *base, int32x4_t value, mve_pred16_t p)
+ {
+- vstrbq_p_s32 (addr, value, p);
++ return vstrbq_p_s32 (base, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrbt.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrbt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int8_t * addr, int32x4_t value, mve_pred16_t p)
++foo1 (int8_t *base, int32x4_t value, mve_pred16_t p)
+ {
+- vstrbq_p (addr, value, p);
++ return vstrbq_p (base, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrbt.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_p_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_p_s8.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrbt.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (int8_t * addr, int8x16_t value, mve_pred16_t p)
++foo (int8_t *base, int8x16_t value, mve_pred16_t p)
+ {
+- vstrbq_p_s8 (addr, value, p);
++ return vstrbq_p_s8 (base, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrbt.8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrbt.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int8_t * addr, int8x16_t value, mve_pred16_t p)
++foo1 (int8_t *base, int8x16_t value, mve_pred16_t p)
+ {
+- vstrbq_p (addr, value, p);
++ return vstrbq_p (base, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrbt.8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_p_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_p_u16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrbt.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint8_t * addr, uint16x8_t value, mve_pred16_t p)
++foo (uint8_t *base, uint16x8_t value, mve_pred16_t p)
+ {
+- vstrbq_p_u16 (addr, value, p);
++ return vstrbq_p_u16 (base, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrbt.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrbt.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint8_t * addr, uint16x8_t value, mve_pred16_t p)
++foo1 (uint8_t *base, uint16x8_t value, mve_pred16_t p)
+ {
+- vstrbq_p (addr, value, p);
++ return vstrbq_p (base, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrbt.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_p_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_p_u32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrbt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint8_t * addr, uint32x4_t value, mve_pred16_t p)
++foo (uint8_t *base, uint32x4_t value, mve_pred16_t p)
+ {
+- vstrbq_p_u32 (addr, value, p);
++ return vstrbq_p_u32 (base, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrbt.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrbt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint8_t * addr, uint32x4_t value, mve_pred16_t p)
++foo1 (uint8_t *base, uint32x4_t value, mve_pred16_t p)
+ {
+- vstrbq_p (addr, value, p);
++ return vstrbq_p (base, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrbt.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_p_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_p_u8.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrbt.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint8_t * addr, uint8x16_t value, mve_pred16_t p)
++foo (uint8_t *base, uint8x16_t value, mve_pred16_t p)
+ {
+- vstrbq_p_u8 (addr, value, p);
++ return vstrbq_p_u8 (base, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrbt.8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrbt.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint8_t * addr, uint8x16_t value, mve_pred16_t p)
++foo1 (uint8_t *base, uint8x16_t value, mve_pred16_t p)
+ {
+- vstrbq_p (addr, value, p);
++ return vstrbq_p (base, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrbt.8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrb.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (int8_t * addr, int16x8_t value)
++foo (int8_t *base, int16x8_t value)
+ {
+- vstrbq_s16 (addr, value);
++ return vstrbq_s16 (base, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrb.16" } } */
+
++/*
++**foo1:
++** ...
++** vstrb.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int8_t * addr, int16x8_t value)
++foo1 (int8_t *base, int16x8_t value)
+ {
+- vstrbq (addr, value);
++ return vstrbq (base, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrb.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrb.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (int8_t * addr, int32x4_t value)
++foo (int8_t *base, int32x4_t value)
+ {
+- vstrbq_s32 (addr, value);
++ return vstrbq_s32 (base, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrb.32" } } */
+
++/*
++**foo1:
++** ...
++** vstrb.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int8_t * addr, int32x4_t value)
++foo1 (int8_t *base, int32x4_t value)
+ {
+- vstrbq (addr, value);
++ return vstrbq (base, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrb.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrb.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (int8_t * addr, int8x16_t value)
++foo (int8_t *base, int8x16_t value)
+ {
+- vstrbq_s8 (addr, value);
++ return vstrbq_s8 (base, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrb.8" } } */
+
++/*
++**foo1:
++** ...
++** vstrb.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int8_t * addr, int8x16_t value)
++foo1 (int8_t *base, int8x16_t value)
+ {
+- vstrbq (addr, value);
++ return vstrbq (base, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrb.8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_p_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_p_s16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrbt.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (int8_t * base, uint16x8_t offset, int16x8_t value, mve_pred16_t p)
++foo (int8_t *base, uint16x8_t offset, int16x8_t value, mve_pred16_t p)
+ {
+- vstrbq_scatter_offset_p_s16 (base, offset, value, p);
++ return vstrbq_scatter_offset_p_s16 (base, offset, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrbt.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrbt.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int8_t * base, uint16x8_t offset, int16x8_t value, mve_pred16_t p)
++foo1 (int8_t *base, uint16x8_t offset, int16x8_t value, mve_pred16_t p)
+ {
+- vstrbq_scatter_offset_p (base, offset, value, p);
++ return vstrbq_scatter_offset_p (base, offset, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrbt.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrbt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (int8_t * base, uint32x4_t offset, int32x4_t value, mve_pred16_t p)
++foo (int8_t *base, uint32x4_t offset, int32x4_t value, mve_pred16_t p)
+ {
+- vstrbq_scatter_offset_p_s32 (base, offset, value, p);
++ return vstrbq_scatter_offset_p_s32 (base, offset, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrbt.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrbt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int8_t * base, uint32x4_t offset, int32x4_t value, mve_pred16_t p)
++foo1 (int8_t *base, uint32x4_t offset, int32x4_t value, mve_pred16_t p)
+ {
+- vstrbq_scatter_offset_p (base, offset, value, p);
++ return vstrbq_scatter_offset_p (base, offset, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrbt.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_p_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_p_s8.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrbt.8 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (int8_t * base, uint8x16_t offset, int8x16_t value, mve_pred16_t p)
++foo (int8_t *base, uint8x16_t offset, int8x16_t value, mve_pred16_t p)
+ {
+- vstrbq_scatter_offset_p_s8 (base, offset, value, p);
++ return vstrbq_scatter_offset_p_s8 (base, offset, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrbt.8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrbt.8 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int8_t * base, uint8x16_t offset, int8x16_t value, mve_pred16_t p)
++foo1 (int8_t *base, uint8x16_t offset, int8x16_t value, mve_pred16_t p)
+ {
+- vstrbq_scatter_offset_p (base, offset, value, p);
++ return vstrbq_scatter_offset_p (base, offset, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrbt.8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_p_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_p_u16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrbt.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint8_t * base, uint16x8_t offset, uint16x8_t value, mve_pred16_t p)
++foo (uint8_t *base, uint16x8_t offset, uint16x8_t value, mve_pred16_t p)
+ {
+- vstrbq_scatter_offset_p_u16 (base, offset, value, p);
++ return vstrbq_scatter_offset_p_u16 (base, offset, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrbt.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrbt.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint8_t * base, uint16x8_t offset, uint16x8_t value, mve_pred16_t p)
++foo1 (uint8_t *base, uint16x8_t offset, uint16x8_t value, mve_pred16_t p)
+ {
+- vstrbq_scatter_offset_p (base, offset, value, p);
++ return vstrbq_scatter_offset_p (base, offset, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrbt.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_p_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_p_u32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrbt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint8_t * base, uint32x4_t offset, uint32x4_t value, mve_pred16_t p)
++foo (uint8_t *base, uint32x4_t offset, uint32x4_t value, mve_pred16_t p)
+ {
+- vstrbq_scatter_offset_p_u32 (base, offset, value, p);
++ return vstrbq_scatter_offset_p_u32 (base, offset, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrbt.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrbt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint8_t * base, uint32x4_t offset, uint32x4_t value, mve_pred16_t p)
++foo1 (uint8_t *base, uint32x4_t offset, uint32x4_t value, mve_pred16_t p)
+ {
+- vstrbq_scatter_offset_p (base, offset, value, p);
++ return vstrbq_scatter_offset_p (base, offset, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrbt.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_p_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_p_u8.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrbt.8 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint8_t * base, uint8x16_t offset, uint8x16_t value, mve_pred16_t p)
++foo (uint8_t *base, uint8x16_t offset, uint8x16_t value, mve_pred16_t p)
+ {
+- vstrbq_scatter_offset_p_u8 (base, offset, value, p);
++ return vstrbq_scatter_offset_p_u8 (base, offset, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrbt.8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrbt.8 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint8_t * base, uint8x16_t offset, uint8x16_t value, mve_pred16_t p)
++foo1 (uint8_t *base, uint8x16_t offset, uint8x16_t value, mve_pred16_t p)
+ {
+- vstrbq_scatter_offset_p (base, offset, value, p);
++ return vstrbq_scatter_offset_p (base, offset, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrbt.8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrb.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (int8_t * base, uint16x8_t offset, int16x8_t value)
++foo (int8_t *base, uint16x8_t offset, int16x8_t value)
+ {
+- vstrbq_scatter_offset_s16 (base, offset, value);
++ return vstrbq_scatter_offset_s16 (base, offset, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrb.16" } } */
+
++/*
++**foo1:
++** ...
++** vstrb.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int8_t * base, uint16x8_t offset, int16x8_t value)
++foo1 (int8_t *base, uint16x8_t offset, int16x8_t value)
+ {
+- vstrbq_scatter_offset (base, offset, value);
++ return vstrbq_scatter_offset (base, offset, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrb.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrb.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (int8_t * base, uint32x4_t offset, int32x4_t value)
++foo (int8_t *base, uint32x4_t offset, int32x4_t value)
+ {
+- vstrbq_scatter_offset_s32 (base, offset, value);
++ return vstrbq_scatter_offset_s32 (base, offset, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrb.32" } } */
+
++/*
++**foo1:
++** ...
++** vstrb.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int8_t * base, uint32x4_t offset, int32x4_t value)
++foo1 (int8_t *base, uint32x4_t offset, int32x4_t value)
+ {
+- vstrbq_scatter_offset (base, offset, value);
++ return vstrbq_scatter_offset (base, offset, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrb.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrb.8 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (int8_t * base, uint8x16_t offset, int8x16_t value)
++foo (int8_t *base, uint8x16_t offset, int8x16_t value)
+ {
+- vstrbq_scatter_offset_s8 (base, offset, value);
++ return vstrbq_scatter_offset_s8 (base, offset, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrb.8" } } */
+
++/*
++**foo1:
++** ...
++** vstrb.8 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int8_t * base, uint8x16_t offset, int8x16_t value)
++foo1 (int8_t *base, uint8x16_t offset, int8x16_t value)
+ {
+- vstrbq_scatter_offset (base, offset, value);
++ return vstrbq_scatter_offset (base, offset, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrb.8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrb.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint8_t * base, uint16x8_t offset, uint16x8_t value)
++foo (uint8_t *base, uint16x8_t offset, uint16x8_t value)
+ {
+- vstrbq_scatter_offset_u16 (base, offset, value);
++ return vstrbq_scatter_offset_u16 (base, offset, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrb.16" } } */
+
++/*
++**foo1:
++** ...
++** vstrb.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint8_t * base, uint16x8_t offset, uint16x8_t value)
++foo1 (uint8_t *base, uint16x8_t offset, uint16x8_t value)
+ {
+- vstrbq_scatter_offset (base, offset, value);
++ return vstrbq_scatter_offset (base, offset, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrb.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrb.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint8_t * base, uint32x4_t offset, uint32x4_t value)
++foo (uint8_t *base, uint32x4_t offset, uint32x4_t value)
+ {
+- vstrbq_scatter_offset_u32 (base, offset, value);
++ return vstrbq_scatter_offset_u32 (base, offset, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrb.32" } } */
+
++/*
++**foo1:
++** ...
++** vstrb.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint8_t * base, uint32x4_t offset, uint32x4_t value)
++foo1 (uint8_t *base, uint32x4_t offset, uint32x4_t value)
+ {
+- vstrbq_scatter_offset (base, offset, value);
++ return vstrbq_scatter_offset (base, offset, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrb.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_scatter_offset_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrb.8 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint8_t * base, uint8x16_t offset, uint8x16_t value)
++foo (uint8_t *base, uint8x16_t offset, uint8x16_t value)
+ {
+- vstrbq_scatter_offset_u8 (base, offset, value);
++ return vstrbq_scatter_offset_u8 (base, offset, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrb.8" } } */
+
++/*
++**foo1:
++** ...
++** vstrb.8 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint8_t * base, uint8x16_t offset, uint8x16_t value)
++foo1 (uint8_t *base, uint8x16_t offset, uint8x16_t value)
+ {
+- vstrbq_scatter_offset (base, offset, value);
++ return vstrbq_scatter_offset (base, offset, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrb.8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrb.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint8_t * addr, uint16x8_t value)
++foo (uint8_t *base, uint16x8_t value)
+ {
+- vstrbq_u16 (addr, value);
++ return vstrbq_u16 (base, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrb.16" } } */
+
++/*
++**foo1:
++** ...
++** vstrb.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint8_t * addr, uint16x8_t value)
++foo1 (uint8_t *base, uint16x8_t value)
+ {
+- vstrbq (addr, value);
++ return vstrbq (base, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrb.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrb.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint8_t * addr, uint32x4_t value)
++foo (uint8_t *base, uint32x4_t value)
+ {
+- vstrbq_u32 (addr, value);
++ return vstrbq_u32 (base, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrb.32" } } */
+
++/*
++**foo1:
++** ...
++** vstrb.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint8_t * addr, uint32x4_t value)
++foo1 (uint8_t *base, uint32x4_t value)
+ {
+- vstrbq (addr, value);
++ return vstrbq (base, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrb.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrbq_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrb.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint8_t * addr, uint8x16_t value)
++foo (uint8_t *base, uint8x16_t value)
+ {
+- vstrbq_u8 (addr, value);
++ return vstrbq_u8 (base, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrb.8" } } */
+
++/*
++**foo1:
++** ...
++** vstrb.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint8_t * addr, uint8x16_t value)
++foo1 (uint8_t *base, uint8x16_t value)
+ {
+- vstrbq (addr, value);
++ return vstrbq (base, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrb.8" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_p_s64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_p_s64.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrdt.u64 q[0-9]+, \[q[0-9]+, #[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint64x2_t addr, const int offset, int64x2_t value, mve_pred16_t p)
++foo (uint64x2_t addr, int64x2_t value, mve_pred16_t p)
+ {
+- vstrdq_scatter_base_p_s64 (addr, 8, value, p);
++ return vstrdq_scatter_base_p_s64 (addr, 0, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrdt.u64" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrdt.u64 q[0-9]+, \[q[0-9]+, #[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint64x2_t addr, const int offset, int64x2_t value, mve_pred16_t p)
++foo1 (uint64x2_t addr, int64x2_t value, mve_pred16_t p)
+ {
+- vstrdq_scatter_base_p (addr, 8, value, p);
++ return vstrdq_scatter_base_p (addr, 0, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrdt.u64" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_p_u64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_p_u64.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrdt.u64 q[0-9]+, \[q[0-9]+, #[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint64x2_t addr, const int offset, uint64x2_t value, mve_pred16_t p)
++foo (uint64x2_t addr, uint64x2_t value, mve_pred16_t p)
+ {
+- vstrdq_scatter_base_p_u64 (addr, 8, value, p);
++ return vstrdq_scatter_base_p_u64 (addr, 0, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrdt.u64" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrdt.u64 q[0-9]+, \[q[0-9]+, #[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint64x2_t addr, const int offset, uint64x2_t value, mve_pred16_t p)
++foo1 (uint64x2_t addr, uint64x2_t value, mve_pred16_t p)
+ {
+- vstrdq_scatter_base_p (addr, 8, value, p);
++ return vstrdq_scatter_base_p (addr, 0, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrdt.u64" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_s64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_s64.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrd.u64 q[0-9]+, \[q[0-9]+, #[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint64x2_t addr, const int offset, int64x2_t value)
++foo (uint64x2_t addr, int64x2_t value)
+ {
+- vstrdq_scatter_base_s64 (addr, 1016, value);
++ return vstrdq_scatter_base_s64 (addr, 0, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrd.u64" } } */
+
++/*
++**foo1:
++** ...
++** vstrd.u64 q[0-9]+, \[q[0-9]+, #[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint64x2_t addr, const int offset, int64x2_t value)
++foo1 (uint64x2_t addr, int64x2_t value)
+ {
+- vstrdq_scatter_base (addr, 1016, value);
++ return vstrdq_scatter_base (addr, 0, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrd.u64" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_u64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_u64.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrd.u64 q[0-9]+, \[q[0-9]+, #[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint64x2_t addr, const int offset, uint64x2_t value)
++foo (uint64x2_t addr, uint64x2_t value)
+ {
+- vstrdq_scatter_base_u64 (addr, 8, value);
++ return vstrdq_scatter_base_u64 (addr, 0, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrd.u64" } } */
+
++/*
++**foo1:
++** ...
++** vstrd.u64 q[0-9]+, \[q[0-9]+, #[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint64x2_t addr, const int offset, uint64x2_t value)
++foo1 (uint64x2_t addr, uint64x2_t value)
+ {
+- vstrdq_scatter_base (addr, 8, value);
++ return vstrdq_scatter_base (addr, 0, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrd.u64" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_wb_p_s64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_wb_p_s64.c
+@@ -1,19 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrdt.u64 q[0-9]+, \[q[0-9]+, #[0-9]+\]!(?: @.*|)
++** ...
++*/
+ void
+-foo (uint64x2_t * addr, const int offset, int64x2_t value, mve_pred16_t p)
++foo (uint64x2_t *addr, int64x2_t value, mve_pred16_t p)
+ {
+- vstrdq_scatter_base_wb_p_s64 (addr, 8, value, p);
++ return vstrdq_scatter_base_wb_p_s64 (addr, 0, value, p);
+ }
+
++
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrdt.u64 q[0-9]+, \[q[0-9]+, #[0-9]+\]!(?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint64x2_t * addr, const int offset, int64x2_t value, mve_pred16_t p)
++foo1 (uint64x2_t *addr, int64x2_t value, mve_pred16_t p)
+ {
+- vstrdq_scatter_base_wb_p (addr, 8, value, p);
++ return vstrdq_scatter_base_wb_p (addr, 0, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-times "vstrdt.u64\tq\[0-9\]+, \\\[q\[0-9\]+, #\[0-9\]+\\\]!" 2 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_wb_p_u64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_wb_p_u64.c
+@@ -1,19 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrdt.u64 q[0-9]+, \[q[0-9]+, #[0-9]+\]!(?: @.*|)
++** ...
++*/
+ void
+-foo (uint64x2_t * addr, const int offset, uint64x2_t value, mve_pred16_t p)
++foo (uint64x2_t *addr, uint64x2_t value, mve_pred16_t p)
+ {
+- vstrdq_scatter_base_wb_p_u64 (addr, 8, value, p);
++ return vstrdq_scatter_base_wb_p_u64 (addr, 0, value, p);
+ }
+
++
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrdt.u64 q[0-9]+, \[q[0-9]+, #[0-9]+\]!(?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint64x2_t * addr, const int offset, uint64x2_t value, mve_pred16_t p)
++foo1 (uint64x2_t *addr, uint64x2_t value, mve_pred16_t p)
+ {
+- vstrdq_scatter_base_wb_p (addr, 8, value, p);
++ return vstrdq_scatter_base_wb_p (addr, 0, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-times "vstrdt.u64\tq\[0-9\]+, \\\[q\[0-9\]+, #\[0-9\]+\\\]!" 2 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_wb_s64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_wb_s64.c
+@@ -1,19 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrd.u64 q[0-9]+, \[q[0-9]+, #[0-9]+\]!(?: @.*|)
++** ...
++*/
+ void
+-foo (uint64x2_t * addr, const int offset, int64x2_t value)
++foo (uint64x2_t *addr, int64x2_t value)
+ {
+- vstrdq_scatter_base_wb_s64 (addr, 8, value);
++ return vstrdq_scatter_base_wb_s64 (addr, 0, value);
+ }
+
++
++/*
++**foo1:
++** ...
++** vstrd.u64 q[0-9]+, \[q[0-9]+, #[0-9]+\]!(?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint64x2_t * addr, const int offset, int64x2_t value)
++foo1 (uint64x2_t *addr, int64x2_t value)
+ {
+- vstrdq_scatter_base_wb (addr, 8, value);
++ return vstrdq_scatter_base_wb (addr, 0, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-times "vstrd.u64\tq\[0-9\]+, \\\[q\[0-9\]+, #\[0-9\]+\\\]!" 2 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_wb_u64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_wb_u64.c
+@@ -1,19 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrd.u64 q[0-9]+, \[q[0-9]+, #[0-9]+\]!(?: @.*|)
++** ...
++*/
+ void
+-foo (uint64x2_t * addr, const int offset, uint64x2_t value)
++foo (uint64x2_t *addr, uint64x2_t value)
+ {
+- vstrdq_scatter_base_wb_u64 (addr, 8, value);
++ return vstrdq_scatter_base_wb_u64 (addr, 0, value);
+ }
+
++
++/*
++**foo1:
++** ...
++** vstrd.u64 q[0-9]+, \[q[0-9]+, #[0-9]+\]!(?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint64x2_t * addr, const int offset, uint64x2_t value)
++foo1 (uint64x2_t *addr, uint64x2_t value)
+ {
+- vstrdq_scatter_base_wb (addr, 8, value);
++ return vstrdq_scatter_base_wb (addr, 0, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-times "vstrd.u64\tq\[0-9\]+, \\\[q\[0-9\]+, #\[0-9\]+\\\]!" 2 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_offset_p_s64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_offset_p_s64.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrdt.64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (int64_t * base, uint64x2_t offset, int64x2_t value, mve_pred16_t p)
++foo (int64_t *base, uint64x2_t offset, int64x2_t value, mve_pred16_t p)
+ {
+- vstrdq_scatter_offset_p_s64 (base, offset, value, p);
++ return vstrdq_scatter_offset_p_s64 (base, offset, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrdt.64" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrdt.64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int64_t * base, uint64x2_t offset, int64x2_t value, mve_pred16_t p)
++foo1 (int64_t *base, uint64x2_t offset, int64x2_t value, mve_pred16_t p)
+ {
+- vstrdq_scatter_offset_p (base, offset, value, p);
++ return vstrdq_scatter_offset_p (base, offset, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrdt.64" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_offset_p_u64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_offset_p_u64.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrdt.64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint64_t * base, uint64x2_t offset, uint64x2_t value, mve_pred16_t p)
++foo (uint64_t *base, uint64x2_t offset, uint64x2_t value, mve_pred16_t p)
+ {
+- vstrdq_scatter_offset_p_u64 (base, offset, value, p);
++ return vstrdq_scatter_offset_p_u64 (base, offset, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrdt.64" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrdt.64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint64_t * base, uint64x2_t offset, uint64x2_t value, mve_pred16_t p)
++foo1 (uint64_t *base, uint64x2_t offset, uint64x2_t value, mve_pred16_t p)
+ {
+- vstrdq_scatter_offset_p (base, offset, value, p);
++ return vstrdq_scatter_offset_p (base, offset, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrdt.64" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_offset_s64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_offset_s64.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrd.64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (int64_t * base, uint64x2_t offset, int64x2_t value)
++foo (int64_t *base, uint64x2_t offset, int64x2_t value)
+ {
+- vstrdq_scatter_offset_s64 (base, offset, value);
++ return vstrdq_scatter_offset_s64 (base, offset, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrd.64" } } */
+
++/*
++**foo1:
++** ...
++** vstrd.64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int64_t * base, uint64x2_t offset, int64x2_t value)
++foo1 (int64_t *base, uint64x2_t offset, int64x2_t value)
+ {
+- vstrdq_scatter_offset (base, offset, value);
++ return vstrdq_scatter_offset (base, offset, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrd.64" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_offset_u64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_offset_u64.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrd.64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint64_t * base, uint64x2_t offset, uint64x2_t value)
++foo (uint64_t *base, uint64x2_t offset, uint64x2_t value)
+ {
+- vstrdq_scatter_offset_u64 (base, offset, value);
++ return vstrdq_scatter_offset_u64 (base, offset, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrd.64" } } */
+
++/*
++**foo1:
++** ...
++** vstrd.64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint64_t * base, uint64x2_t offset, uint64x2_t value)
++foo1 (uint64_t *base, uint64x2_t offset, uint64x2_t value)
+ {
+- vstrdq_scatter_offset (base, offset, value);
++ return vstrdq_scatter_offset (base, offset, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrd.64" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_shifted_offset_p_s64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_shifted_offset_p_s64.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrdt.64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #3\](?: @.*|)
++** ...
++*/
+ void
+-foo (int64_t * base, uint64x2_t offset, int64x2_t value, mve_pred16_t p)
++foo (int64_t *base, uint64x2_t offset, int64x2_t value, mve_pred16_t p)
+ {
+- vstrdq_scatter_shifted_offset_p_s64 (base, offset, value, p);
++ return vstrdq_scatter_shifted_offset_p_s64 (base, offset, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrdt.64" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrdt.64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #3\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int64_t * base, uint64x2_t offset, int64x2_t value, mve_pred16_t p)
++foo1 (int64_t *base, uint64x2_t offset, int64x2_t value, mve_pred16_t p)
+ {
+- vstrdq_scatter_shifted_offset_p (base, offset, value, p);
++ return vstrdq_scatter_shifted_offset_p (base, offset, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrdt.64" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_shifted_offset_p_u64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_shifted_offset_p_u64.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrdt.64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #3\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint64_t * base, uint64x2_t offset, uint64x2_t value, mve_pred16_t p)
++foo (uint64_t *base, uint64x2_t offset, uint64x2_t value, mve_pred16_t p)
+ {
+- vstrdq_scatter_shifted_offset_p_u64 (base, offset, value, p);
++ return vstrdq_scatter_shifted_offset_p_u64 (base, offset, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrdt.64" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrdt.64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #3\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint64_t * base, uint64x2_t offset, uint64x2_t value, mve_pred16_t p)
++foo1 (uint64_t *base, uint64x2_t offset, uint64x2_t value, mve_pred16_t p)
+ {
+- vstrdq_scatter_shifted_offset_p (base, offset, value, p);
++ return vstrdq_scatter_shifted_offset_p (base, offset, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrdt.64" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_shifted_offset_s64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_shifted_offset_s64.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrd.64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #3\](?: @.*|)
++** ...
++*/
+ void
+-foo (int64_t * base, uint64x2_t offset, int64x2_t value)
++foo (int64_t *base, uint64x2_t offset, int64x2_t value)
+ {
+- vstrdq_scatter_shifted_offset_s64 (base, offset, value);
++ return vstrdq_scatter_shifted_offset_s64 (base, offset, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrd.64" } } */
+
++/*
++**foo1:
++** ...
++** vstrd.64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #3\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int64_t * base, uint64x2_t offset, int64x2_t value)
++foo1 (int64_t *base, uint64x2_t offset, int64x2_t value)
+ {
+- vstrdq_scatter_shifted_offset (base, offset, value);
++ return vstrdq_scatter_shifted_offset (base, offset, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrd.64" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_shifted_offset_u64.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_shifted_offset_u64.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrd.64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #3\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint64_t * base, uint64x2_t offset, uint64x2_t value)
++foo (uint64_t *base, uint64x2_t offset, uint64x2_t value)
+ {
+- vstrdq_scatter_shifted_offset_u64 (base, offset, value);
++ return vstrdq_scatter_shifted_offset_u64 (base, offset, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrd.64" } } */
+
++/*
++**foo1:
++** ...
++** vstrd.64 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #3\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint64_t * base, uint64x2_t offset, uint64x2_t value)
++foo1 (uint64_t *base, uint64x2_t offset, uint64x2_t value)
+ {
+- vstrdq_scatter_shifted_offset (base, offset, value);
++ return vstrdq_scatter_shifted_offset (base, offset, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrd.64" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_f16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (float16_t * addr, float16x8_t value)
++foo (float16_t *base, float16x8_t value)
+ {
+- vstrhq_f16 (addr, value);
++ return vstrhq_f16 (base, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrh.16" } } */
+
++/*
++**foo1:
++** ...
++** vstrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (float16_t * addr, float16x8_t value)
++foo1 (float16_t *base, float16x8_t value)
+ {
+- vstrhq (addr, value);
++ return vstrhq (base, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrh.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_p_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_p_f16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (float16_t * addr, float16x8_t value, mve_pred16_t p)
++foo (float16_t *base, float16x8_t value, mve_pred16_t p)
+ {
+- vstrhq_p_f16 (addr, value, p);
++ return vstrhq_p_f16 (base, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrht.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (float16_t * addr, float16x8_t value, mve_pred16_t p)
++foo1 (float16_t *base, float16x8_t value, mve_pred16_t p)
+ {
+- vstrhq_p (addr, value, p);
++ return vstrhq_p (base, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrht.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_p_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_p_s16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (int16_t * addr, int16x8_t value, mve_pred16_t p)
++foo (int16_t *base, int16x8_t value, mve_pred16_t p)
+ {
+- vstrhq_p_s16 (addr, value, p);
++ return vstrhq_p_s16 (base, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrht.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int16_t * addr, int16x8_t value, mve_pred16_t p)
++foo1 (int16_t *base, int16x8_t value, mve_pred16_t p)
+ {
+- vstrhq_p (addr, value, p);
++ return vstrhq_p (base, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrht.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (int16_t * addr, int32x4_t value, mve_pred16_t p)
++foo (int16_t *base, int32x4_t value, mve_pred16_t p)
+ {
+- vstrhq_p_s32 (addr, value, p);
++ return vstrhq_p_s32 (base, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrht.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int16_t * addr, int32x4_t value, mve_pred16_t p)
++foo1 (int16_t *base, int32x4_t value, mve_pred16_t p)
+ {
+- vstrhq_p (addr, value, p);
++ return vstrhq_p (base, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrht.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_p_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_p_u16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint16_t * addr, uint16x8_t value, mve_pred16_t p)
++foo (uint16_t *base, uint16x8_t value, mve_pred16_t p)
+ {
+- vstrhq_p_u16 (addr, value, p);
++ return vstrhq_p_u16 (base, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrht.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint16_t * addr, uint16x8_t value, mve_pred16_t p)
++foo1 (uint16_t *base, uint16x8_t value, mve_pred16_t p)
+ {
+- vstrhq_p (addr, value, p);
++ return vstrhq_p (base, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrht.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_p_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_p_u32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint16_t * addr, uint32x4_t value, mve_pred16_t p)
++foo (uint16_t *base, uint32x4_t value, mve_pred16_t p)
+ {
+- vstrhq_p_u32 (addr, value, p);
++ return vstrhq_p_u32 (base, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrht.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint16_t * addr, uint32x4_t value, mve_pred16_t p)
++foo1 (uint16_t *base, uint32x4_t value, mve_pred16_t p)
+ {
+- vstrhq_p (addr, value, p);
++ return vstrhq_p (base, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrht.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (int16_t * addr, int16x8_t value)
++foo (int16_t *base, int16x8_t value)
+ {
+- vstrhq_s16 (addr, value);
++ return vstrhq_s16 (base, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrh.16" } } */
+
++/*
++**foo1:
++** ...
++** vstrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int16_t * addr, int16x8_t value)
++foo1 (int16_t *base, int16x8_t value)
+ {
+- vstrhq (addr, value);
++ return vstrhq (base, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrh.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrh.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (int16_t * addr, int32x4_t value)
++foo (int16_t *base, int32x4_t value)
+ {
+- vstrhq_s32 (addr, value);
++ return vstrhq_s32 (base, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrh.32" } } */
+
++/*
++**foo1:
++** ...
++** vstrh.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int16_t * addr, int32x4_t value)
++foo1 (int16_t *base, int32x4_t value)
+ {
+- vstrhq (addr, value);
++ return vstrhq (base, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrh.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_f16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (float16_t * base, uint16x8_t offset, float16x8_t value)
++foo (float16_t *base, uint16x8_t offset, float16x8_t value)
+ {
+- vstrhq_scatter_offset_f16 (base, offset, value);
++ return vstrhq_scatter_offset_f16 (base, offset, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrh.16" } } */
+
++/*
++**foo1:
++** ...
++** vstrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (float16_t * base, uint16x8_t offset, float16x8_t value)
++foo1 (float16_t *base, uint16x8_t offset, float16x8_t value)
+ {
+- vstrhq_scatter_offset (base, offset, value);
++ return vstrhq_scatter_offset (base, offset, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrh.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_p_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_p_f16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (float16_t * base, uint16x8_t offset, float16x8_t value, mve_pred16_t p)
++foo (float16_t *base, uint16x8_t offset, float16x8_t value, mve_pred16_t p)
+ {
+- vstrhq_scatter_offset_p_f16 (base, offset, value, p);
++ return vstrhq_scatter_offset_p_f16 (base, offset, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrht.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (float16_t * base, uint16x8_t offset, float16x8_t value, mve_pred16_t p)
++foo1 (float16_t *base, uint16x8_t offset, float16x8_t value, mve_pred16_t p)
+ {
+- vstrhq_scatter_offset_p (base, offset, value, p);
++ return vstrhq_scatter_offset_p (base, offset, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrht.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_p_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_p_s16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (int16_t * base, uint16x8_t offset, int16x8_t value, mve_pred16_t p)
++foo (int16_t *base, uint16x8_t offset, int16x8_t value, mve_pred16_t p)
+ {
+- vstrhq_scatter_offset_p_s16 (base, offset, value, p);
++ return vstrhq_scatter_offset_p_s16 (base, offset, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrht.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int16_t * base, uint16x8_t offset, int16x8_t value, mve_pred16_t p)
++foo1 (int16_t *base, uint16x8_t offset, int16x8_t value, mve_pred16_t p)
+ {
+- vstrhq_scatter_offset_p (base, offset, value, p);
++ return vstrhq_scatter_offset_p (base, offset, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrht.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (int16_t * base, uint32x4_t offset, int32x4_t value, mve_pred16_t p)
++foo (int16_t *base, uint32x4_t offset, int32x4_t value, mve_pred16_t p)
+ {
+- vstrhq_scatter_offset_p_s32 (base, offset, value, p);
++ return vstrhq_scatter_offset_p_s32 (base, offset, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrht.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int16_t * base, uint32x4_t offset, int32x4_t value, mve_pred16_t p)
++foo1 (int16_t *base, uint32x4_t offset, int32x4_t value, mve_pred16_t p)
+ {
+- vstrhq_scatter_offset_p (base, offset, value, p);
++ return vstrhq_scatter_offset_p (base, offset, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrht.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_p_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_p_u16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint16_t * base, uint16x8_t offset, uint16x8_t value, mve_pred16_t p)
++foo (uint16_t *base, uint16x8_t offset, uint16x8_t value, mve_pred16_t p)
+ {
+- vstrhq_scatter_offset_p_u16 (base, offset, value, p);
++ return vstrhq_scatter_offset_p_u16 (base, offset, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrht.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint16_t * base, uint16x8_t offset, uint16x8_t value, mve_pred16_t p)
++foo1 (uint16_t *base, uint16x8_t offset, uint16x8_t value, mve_pred16_t p)
+ {
+- vstrhq_scatter_offset_p (base, offset, value, p);
++ return vstrhq_scatter_offset_p (base, offset, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrht.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_p_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_p_u32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint16_t * base, uint32x4_t offset, uint32x4_t value, mve_pred16_t p)
++foo (uint16_t *base, uint32x4_t offset, uint32x4_t value, mve_pred16_t p)
+ {
+- vstrhq_scatter_offset_p_u32 (base, offset, value, p);
++ return vstrhq_scatter_offset_p_u32 (base, offset, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrht.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint16_t * base, uint32x4_t offset, uint32x4_t value, mve_pred16_t p)
++foo1 (uint16_t *base, uint32x4_t offset, uint32x4_t value, mve_pred16_t p)
+ {
+- vstrhq_scatter_offset_p (base, offset, value, p);
++ return vstrhq_scatter_offset_p (base, offset, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrht.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (int16_t * base, uint16x8_t offset, int16x8_t value)
++foo (int16_t *base, uint16x8_t offset, int16x8_t value)
+ {
+- vstrhq_scatter_offset_s16 (base, offset, value);
++ return vstrhq_scatter_offset_s16 (base, offset, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrh.16" } } */
+
++/*
++**foo1:
++** ...
++** vstrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int16_t * base, uint16x8_t offset, int16x8_t value)
++foo1 (int16_t *base, uint16x8_t offset, int16x8_t value)
+ {
+- vstrhq_scatter_offset (base, offset, value);
++ return vstrhq_scatter_offset (base, offset, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrh.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrh.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (int16_t * base, uint32x4_t offset, int32x4_t value)
++foo (int16_t *base, uint32x4_t offset, int32x4_t value)
+ {
+- vstrhq_scatter_offset_s32 (base, offset, value);
++ return vstrhq_scatter_offset_s32 (base, offset, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrh.32" } } */
+
++/*
++**foo1:
++** ...
++** vstrh.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int16_t * base, uint32x4_t offset, int32x4_t value)
++foo1 (int16_t *base, uint32x4_t offset, int32x4_t value)
+ {
+- vstrhq_scatter_offset (base, offset, value);
++ return vstrhq_scatter_offset (base, offset, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrh.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint16_t * base, uint16x8_t offset, uint16x8_t value)
++foo (uint16_t *base, uint16x8_t offset, uint16x8_t value)
+ {
+- vstrhq_scatter_offset_u16 (base, offset, value);
++ return vstrhq_scatter_offset_u16 (base, offset, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrh.16" } } */
+
++/*
++**foo1:
++** ...
++** vstrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint16_t * base, uint16x8_t offset, uint16x8_t value)
++foo1 (uint16_t *base, uint16x8_t offset, uint16x8_t value)
+ {
+- vstrhq_scatter_offset (base, offset, value);
++ return vstrhq_scatter_offset (base, offset, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrh.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrh.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint16_t * base, uint32x4_t offset, uint32x4_t value)
++foo (uint16_t *base, uint32x4_t offset, uint32x4_t value)
+ {
+- vstrhq_scatter_offset_u32 (base, offset, value);
++ return vstrhq_scatter_offset_u32 (base, offset, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrh.32" } } */
+
++/*
++**foo1:
++** ...
++** vstrh.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint16_t * base, uint32x4_t offset, uint32x4_t value)
++foo1 (uint16_t *base, uint32x4_t offset, uint32x4_t value)
+ {
+- vstrhq_scatter_offset (base, offset, value);
++ return vstrhq_scatter_offset (base, offset, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrh.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_f16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ void
+-foo (float16_t * base, uint16x8_t offset, float16x8_t value)
++foo (float16_t *base, uint16x8_t offset, float16x8_t value)
+ {
+- vstrhq_scatter_shifted_offset_f16 (base, offset, value);
++ return vstrhq_scatter_shifted_offset_f16 (base, offset, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrh.16" } } */
+
++/*
++**foo1:
++** ...
++** vstrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (float16_t * base, uint16x8_t offset, float16x8_t value)
++foo1 (float16_t *base, uint16x8_t offset, float16x8_t value)
+ {
+- vstrhq_scatter_shifted_offset (base, offset, value);
++ return vstrhq_scatter_shifted_offset (base, offset, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrh.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_p_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_p_f16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ void
+-foo (float16_t * base, uint16x8_t offset, float16x8_t value, mve_pred16_t p)
++foo (float16_t *base, uint16x8_t offset, float16x8_t value, mve_pred16_t p)
+ {
+- vstrhq_scatter_shifted_offset_p_f16 (base, offset, value, p);
++ return vstrhq_scatter_shifted_offset_p_f16 (base, offset, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrht.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (float16_t * base, uint16x8_t offset, float16x8_t value, mve_pred16_t p)
++foo1 (float16_t *base, uint16x8_t offset, float16x8_t value, mve_pred16_t p)
+ {
+- vstrhq_scatter_shifted_offset_p (base, offset, value, p);
++ return vstrhq_scatter_shifted_offset_p (base, offset, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrht.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_p_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_p_s16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ void
+-foo (int16_t * base, uint16x8_t offset, int16x8_t value, mve_pred16_t p)
++foo (int16_t *base, uint16x8_t offset, int16x8_t value, mve_pred16_t p)
+ {
+- vstrhq_scatter_shifted_offset_p_s16 (base, offset, value, p);
++ return vstrhq_scatter_shifted_offset_p_s16 (base, offset, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrht.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int16_t * base, uint16x8_t offset, int16x8_t value, mve_pred16_t p)
++foo1 (int16_t *base, uint16x8_t offset, int16x8_t value, mve_pred16_t p)
+ {
+- vstrhq_scatter_shifted_offset_p (base, offset, value, p);
++ return vstrhq_scatter_shifted_offset_p (base, offset, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrht.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ void
+-foo (int16_t * base, uint32x4_t offset, int32x4_t value, mve_pred16_t p)
++foo (int16_t *base, uint32x4_t offset, int32x4_t value, mve_pred16_t p)
+ {
+- vstrhq_scatter_shifted_offset_p_s32 (base, offset, value, p);
++ return vstrhq_scatter_shifted_offset_p_s32 (base, offset, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrht.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int16_t * base, uint32x4_t offset, int32x4_t value, mve_pred16_t p)
++foo1 (int16_t *base, uint32x4_t offset, int32x4_t value, mve_pred16_t p)
+ {
+- vstrhq_scatter_shifted_offset_p (base, offset, value, p);
++ return vstrhq_scatter_shifted_offset_p (base, offset, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrht.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_p_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_p_u16.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint16_t * base, uint16x8_t offset, uint16x8_t value, mve_pred16_t p)
++foo (uint16_t *base, uint16x8_t offset, uint16x8_t value, mve_pred16_t p)
+ {
+- vstrhq_scatter_shifted_offset_p_u16 (base, offset, value, p);
++ return vstrhq_scatter_shifted_offset_p_u16 (base, offset, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrht.16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint16_t * base, uint16x8_t offset, uint16x8_t value, mve_pred16_t p)
++foo1 (uint16_t *base, uint16x8_t offset, uint16x8_t value, mve_pred16_t p)
+ {
+- vstrhq_scatter_shifted_offset_p (base, offset, value, p);
++ return vstrhq_scatter_shifted_offset_p (base, offset, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrht.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_p_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_p_u32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint16_t * base, uint32x4_t offset, uint32x4_t value, mve_pred16_t p)
++foo (uint16_t *base, uint32x4_t offset, uint32x4_t value, mve_pred16_t p)
+ {
+- vstrhq_scatter_shifted_offset_p_u32 (base, offset, value, p);
++ return vstrhq_scatter_shifted_offset_p_u32 (base, offset, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrht.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrht.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint16_t * base, uint32x4_t offset, uint32x4_t value, mve_pred16_t p)
++foo1 (uint16_t *base, uint32x4_t offset, uint32x4_t value, mve_pred16_t p)
+ {
+- vstrhq_scatter_shifted_offset_p (base, offset, value, p);
++ return vstrhq_scatter_shifted_offset_p (base, offset, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrht.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ void
+-foo (int16_t * base, uint16x8_t offset, int16x8_t value)
++foo (int16_t *base, uint16x8_t offset, int16x8_t value)
+ {
+- vstrhq_scatter_shifted_offset_s16 (base, offset, value);
++ return vstrhq_scatter_shifted_offset_s16 (base, offset, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrh.16" } } */
+
++/*
++**foo1:
++** ...
++** vstrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int16_t * base, uint16x8_t offset, int16x8_t value)
++foo1 (int16_t *base, uint16x8_t offset, int16x8_t value)
+ {
+- vstrhq_scatter_shifted_offset (base, offset, value);
++ return vstrhq_scatter_shifted_offset (base, offset, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrh.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrh.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ void
+-foo (int16_t * base, uint32x4_t offset, int32x4_t value)
++foo (int16_t *base, uint32x4_t offset, int32x4_t value)
+ {
+- vstrhq_scatter_shifted_offset_s32 (base, offset, value);
++ return vstrhq_scatter_shifted_offset_s32 (base, offset, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrh.32" } } */
+
++/*
++**foo1:
++** ...
++** vstrh.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int16_t * base, uint32x4_t offset, int32x4_t value)
++foo1 (int16_t *base, uint32x4_t offset, int32x4_t value)
+ {
+- vstrhq_scatter_shifted_offset (base, offset, value);
++ return vstrhq_scatter_shifted_offset (base, offset, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrh.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint16_t * base, uint16x8_t offset, uint16x8_t value)
++foo (uint16_t *base, uint16x8_t offset, uint16x8_t value)
+ {
+- vstrhq_scatter_shifted_offset_u16 (base, offset, value);
++ return vstrhq_scatter_shifted_offset_u16 (base, offset, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrh.16" } } */
+
++/*
++**foo1:
++** ...
++** vstrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint16_t * base, uint16x8_t offset, uint16x8_t value)
++foo1 (uint16_t *base, uint16x8_t offset, uint16x8_t value)
+ {
+- vstrhq_scatter_shifted_offset (base, offset, value);
++ return vstrhq_scatter_shifted_offset (base, offset, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrh.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrh.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint16_t * base, uint32x4_t offset, uint32x4_t value)
++foo (uint16_t *base, uint32x4_t offset, uint32x4_t value)
+ {
+- vstrhq_scatter_shifted_offset_u32 (base, offset, value);
++ return vstrhq_scatter_shifted_offset_u32 (base, offset, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrh.32" } } */
+
++/*
++**foo1:
++** ...
++** vstrh.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #1\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint16_t * base, uint32x4_t offset, uint32x4_t value)
++foo1 (uint16_t *base, uint32x4_t offset, uint32x4_t value)
+ {
+- vstrhq_scatter_shifted_offset (base, offset, value);
++ return vstrhq_scatter_shifted_offset (base, offset, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrh.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint16_t * addr, uint16x8_t value)
++foo (uint16_t *base, uint16x8_t value)
+ {
+- vstrhq_u16 (addr, value);
++ return vstrhq_u16 (base, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrh.16" } } */
+
++/*
++**foo1:
++** ...
++** vstrh.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint16_t * addr, uint16x8_t value)
++foo1 (uint16_t *base, uint16x8_t value)
+ {
+- vstrhq (addr, value);
++ return vstrhq (base, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrh.16" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrh.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint16_t * addr, uint32x4_t value)
++foo (uint16_t *base, uint32x4_t value)
+ {
+- vstrhq_u32 (addr, value);
++ return vstrhq_u32 (base, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrh.32" } } */
+
++/*
++**foo1:
++** ...
++** vstrh.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint16_t * addr, uint32x4_t value)
++foo1 (uint16_t *base, uint32x4_t value)
+ {
+- vstrhq (addr, value);
++ return vstrhq (base, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrh.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (float32_t * addr, float32x4_t value)
++foo (float32_t *base, float32x4_t value)
+ {
+- vstrwq_f32 (addr, value);
++ return vstrwq_f32 (base, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrw.32" } } */
+
++/*
++**foo1:
++** ...
++** vstrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (float32_t * addr, float32x4_t value)
++foo1 (float32_t *base, float32x4_t value)
+ {
+- vstrwq (addr, value);
++ return vstrwq (base, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrw.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_p_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_p_f32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (float32_t * addr, float32x4_t value, mve_pred16_t p)
++foo (float32_t *base, float32x4_t value, mve_pred16_t p)
+ {
+- vstrwq_p_f32 (addr, value, p);
++ return vstrwq_p_f32 (base, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrwt.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (float32_t * addr, float32x4_t value, mve_pred16_t p)
++foo1 (float32_t *base, float32x4_t value, mve_pred16_t p)
+ {
+- vstrwq_p (addr, value, p);
++ return vstrwq_p (base, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrwt.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (int32_t * addr, int32x4_t value, mve_pred16_t p)
++foo (int32_t *base, int32x4_t value, mve_pred16_t p)
+ {
+- vstrwq_p_s32 (addr, value, p);
++ return vstrwq_p_s32 (base, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrwt.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int32_t * addr, int32x4_t value, mve_pred16_t p)
++foo1 (int32_t *base, int32x4_t value, mve_pred16_t p)
+ {
+- vstrwq_p (addr, value, p);
++ return vstrwq_p (base, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrwt.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_p_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_p_u32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint32_t * addr, uint32x4_t value, mve_pred16_t p)
++foo (uint32_t *base, uint32x4_t value, mve_pred16_t p)
+ {
+- vstrwq_p_u32 (addr, value, p);
++ return vstrwq_p_u32 (base, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrwt.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint32_t * addr, uint32x4_t value, mve_pred16_t p)
++foo1 (uint32_t *base, uint32x4_t value, mve_pred16_t p)
+ {
+- vstrwq_p (addr, value, p);
++ return vstrwq_p (base, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrwt.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (int32_t * addr, int32x4_t value)
++foo (int32_t *base, int32x4_t value)
+ {
+- vstrwq_s32 (addr, value);
++ return vstrwq_s32 (base, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrw.32" } } */
+
++/*
++**foo1:
++** ...
++** vstrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int32_t * addr, int32x4_t value)
++foo1 (int32_t *base, int32x4_t value)
+ {
+- vstrwq (addr, value);
++ return vstrwq (base, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrw.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrw.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+ foo (uint32x4_t addr, float32x4_t value)
+ {
+- vstrwq_scatter_base_f32 (addr, 8, value);
++ return vstrwq_scatter_base_f32 (addr, 0, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrw.u32" } } */
+
++/*
++**foo1:
++** ...
++** vstrw.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+ foo1 (uint32x4_t addr, float32x4_t value)
+ {
+- vstrwq_scatter_base (addr, 8, value);
++ return vstrwq_scatter_base (addr, 0, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrw.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_p_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_p_f32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+ foo (uint32x4_t addr, float32x4_t value, mve_pred16_t p)
+ {
+- vstrwq_scatter_base_p_f32 (addr, 8, value, p);
++ return vstrwq_scatter_base_p_f32 (addr, 0, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrwt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+ foo1 (uint32x4_t addr, float32x4_t value, mve_pred16_t p)
+ {
+- vstrwq_scatter_base_p (addr, 8, value, p);
++ return vstrwq_scatter_base_p (addr, 0, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrwt.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+ foo (uint32x4_t addr, int32x4_t value, mve_pred16_t p)
+ {
+- vstrwq_scatter_base_p_s32 (addr, 8, value, p);
++ return vstrwq_scatter_base_p_s32 (addr, 0, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrwt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+ foo1 (uint32x4_t addr, int32x4_t value, mve_pred16_t p)
+ {
+- vstrwq_scatter_base_p (addr, 8, value, p);
++ return vstrwq_scatter_base_p (addr, 0, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrwt.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_p_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_p_u32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+ foo (uint32x4_t addr, uint32x4_t value, mve_pred16_t p)
+ {
+- vstrwq_scatter_base_p_u32 (addr, 8, value, p);
++ return vstrwq_scatter_base_p_u32 (addr, 0, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrwt.u32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+ foo1 (uint32x4_t addr, uint32x4_t value, mve_pred16_t p)
+ {
+- vstrwq_scatter_base_p (addr, 8, value, p);
++ return vstrwq_scatter_base_p (addr, 0, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrwt.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrw.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+ foo (uint32x4_t addr, int32x4_t value)
+ {
+- vstrwq_scatter_base_s32 (addr, 8, value);
++ return vstrwq_scatter_base_s32 (addr, 0, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrw.u32" } } */
+
++/*
++**foo1:
++** ...
++** vstrw.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+ foo1 (uint32x4_t addr, int32x4_t value)
+ {
+- vstrwq_scatter_base (addr, 8, value);
++ return vstrwq_scatter_base (addr, 0, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrw.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrw.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+ foo (uint32x4_t addr, uint32x4_t value)
+ {
+- vstrwq_scatter_base_u32 (addr, 8, value);
++ return vstrwq_scatter_base_u32 (addr, 0, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrw.u32" } } */
+
++/*
++**foo1:
++** ...
++** vstrw.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+ foo1 (uint32x4_t addr, uint32x4_t value)
+ {
+- vstrwq_scatter_base (addr, 8, value);
++ return vstrwq_scatter_base (addr, 0, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrw.u32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_wb_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_wb_f32.c
+@@ -1,19 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrw.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\]!(?: @.*|)
++** ...
++*/
+ void
+-foo (uint32x4_t * addr, const int offset, float32x4_t value)
++foo (uint32x4_t *addr, float32x4_t value)
+ {
+- vstrwq_scatter_base_wb_f32 (addr, 8, value);
++ return vstrwq_scatter_base_wb_f32 (addr, 0, value);
+ }
+
++
++/*
++**foo1:
++** ...
++** vstrw.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\]!(?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint32x4_t * addr, const int offset, float32x4_t value)
++foo1 (uint32x4_t *addr, float32x4_t value)
+ {
+- vstrwq_scatter_base_wb (addr, 8, value);
++ return vstrwq_scatter_base_wb (addr, 0, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-times "vstrw.u32\tq\[0-9\]+, \\\[q\[0-9\]+, #\[0-9\]+\\\]!" 2 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_wb_p_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_wb_p_f32.c
+@@ -1,19 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\]!(?: @.*|)
++** ...
++*/
+ void
+-foo (uint32x4_t * addr, const int offset, float32x4_t value, mve_pred16_t p)
++foo (uint32x4_t *addr, float32x4_t value, mve_pred16_t p)
+ {
+- vstrwq_scatter_base_wb_p_f32 (addr, 8, value, p);
++ return vstrwq_scatter_base_wb_p_f32 (addr, 0, value, p);
+ }
+
++
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\]!(?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint32x4_t * addr, const int offset, float32x4_t value, mve_pred16_t p)
++foo1 (uint32x4_t *addr, float32x4_t value, mve_pred16_t p)
+ {
+- vstrwq_scatter_base_wb_p (addr, 8, value, p);
++ return vstrwq_scatter_base_wb_p (addr, 0, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-times "vstrwt.u32\tq\[0-9\]+, \\\[q\[0-9\]+, #\[0-9\]+\\\]!" 2 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_wb_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_wb_p_s32.c
+@@ -1,19 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\]!(?: @.*|)
++** ...
++*/
+ void
+-foo (uint32x4_t * addr, const int offset, int32x4_t value, mve_pred16_t p)
++foo (uint32x4_t *addr, int32x4_t value, mve_pred16_t p)
+ {
+- vstrwq_scatter_base_wb_p_s32 (addr, 8, value, p);
++ return vstrwq_scatter_base_wb_p_s32 (addr, 0, value, p);
+ }
+
++
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\]!(?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint32x4_t * addr, const int offset, int32x4_t value, mve_pred16_t p)
++foo1 (uint32x4_t *addr, int32x4_t value, mve_pred16_t p)
+ {
+- vstrwq_scatter_base_wb_p (addr, 8, value, p);
++ return vstrwq_scatter_base_wb_p (addr, 0, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-times "vstrwt.u32\tq\[0-9\]+, \\\[q\[0-9\]+, #\[0-9\]+\\\]!" 2 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_wb_p_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_wb_p_u32.c
+@@ -1,19 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\]!(?: @.*|)
++** ...
++*/
+ void
+-foo (uint32x4_t * addr, const int offset, uint32x4_t value, mve_pred16_t p)
++foo (uint32x4_t *addr, uint32x4_t value, mve_pred16_t p)
+ {
+- vstrwq_scatter_base_wb_p_u32 (addr, 8, value, p);
++ return vstrwq_scatter_base_wb_p_u32 (addr, 0, value, p);
+ }
+
++
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\]!(?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint32x4_t * addr, const int offset, uint32x4_t value, mve_pred16_t p)
++foo1 (uint32x4_t *addr, uint32x4_t value, mve_pred16_t p)
+ {
+- vstrwq_scatter_base_wb_p (addr, 8, value, p);
++ return vstrwq_scatter_base_wb_p (addr, 0, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-times "vstrwt.u32\tq\[0-9\]+, \\\[q\[0-9\]+, #\[0-9\]+\\\]!" 2 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_wb_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_wb_s32.c
+@@ -1,19 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrw.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\]!(?: @.*|)
++** ...
++*/
+ void
+-foo (uint32x4_t * addr, const int offset, int32x4_t value)
++foo (uint32x4_t *addr, int32x4_t value)
+ {
+- vstrwq_scatter_base_wb_s32 (addr, 8, value);
++ return vstrwq_scatter_base_wb_s32 (addr, 0, value);
+ }
+
++
++/*
++**foo1:
++** ...
++** vstrw.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\]!(?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint32x4_t * addr, const int offset, int32x4_t value)
++foo1 (uint32x4_t *addr, int32x4_t value)
+ {
+- vstrwq_scatter_base_wb (addr, 8, value);
++ return vstrwq_scatter_base_wb (addr, 0, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-times "vstrw.u32\tq\[0-9\]+, \\\[q\[0-9\]+, #\[0-9\]+\\\]!" 2 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_wb_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_wb_u32.c
+@@ -1,19 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrw.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\]!(?: @.*|)
++** ...
++*/
+ void
+-foo (uint32x4_t * addr, uint32x4_t value)
++foo (uint32x4_t *addr, uint32x4_t value)
+ {
+- vstrwq_scatter_base_wb_u32 (addr, 8, value);
++ return vstrwq_scatter_base_wb_u32 (addr, 0, value);
+ }
+
++
++/*
++**foo1:
++** ...
++** vstrw.u32 q[0-9]+, \[q[0-9]+, #[0-9]+\]!(?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint32x4_t * addr, uint32x4_t value)
++foo1 (uint32x4_t *addr, uint32x4_t value)
+ {
+- vstrwq_scatter_base_wb (addr, 8, value);
++ return vstrwq_scatter_base_wb (addr, 0, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler-times "vstrw.u32\tq\[0-9\]+, \\\[q\[0-9\]+, #\[0-9\]+\\\]!" 2 } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (float32_t * base, uint32x4_t offset, float32x4_t value)
++foo (float32_t *base, uint32x4_t offset, float32x4_t value)
+ {
+- vstrwq_scatter_offset_f32 (base, offset, value);
++ return vstrwq_scatter_offset_f32 (base, offset, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrw.32" } } */
+
++/*
++**foo1:
++** ...
++** vstrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (float32_t * base, uint32x4_t offset, float32x4_t value)
++foo1 (float32_t *base, uint32x4_t offset, float32x4_t value)
+ {
+- vstrwq_scatter_offset (base, offset, value);
++ return vstrwq_scatter_offset (base, offset, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrw.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_p_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_p_f32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (float32_t * base, uint32x4_t offset, float32x4_t value, mve_pred16_t p)
++foo (float32_t *base, uint32x4_t offset, float32x4_t value, mve_pred16_t p)
+ {
+- vstrwq_scatter_offset_p_f32 (base, offset, value, p);
++ return vstrwq_scatter_offset_p_f32 (base, offset, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrwt.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (float32_t * base, uint32x4_t offset, float32x4_t value, mve_pred16_t p)
++foo1 (float32_t *base, uint32x4_t offset, float32x4_t value, mve_pred16_t p)
+ {
+- vstrwq_scatter_offset_p (base, offset, value, p);
++ return vstrwq_scatter_offset_p (base, offset, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrwt.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (int32_t * base, uint32x4_t offset, int32x4_t value, mve_pred16_t p)
++foo (int32_t *base, uint32x4_t offset, int32x4_t value, mve_pred16_t p)
+ {
+- vstrwq_scatter_offset_p_s32 (base, offset, value, p);
++ return vstrwq_scatter_offset_p_s32 (base, offset, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrwt.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int32_t * base, uint32x4_t offset, int32x4_t value, mve_pred16_t p)
++foo1 (int32_t *base, uint32x4_t offset, int32x4_t value, mve_pred16_t p)
+ {
+- vstrwq_scatter_offset_p (base, offset, value, p);
++ return vstrwq_scatter_offset_p (base, offset, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrwt.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_p_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_p_u32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint32_t * base, uint32x4_t offset, uint32x4_t value, mve_pred16_t p)
++foo (uint32_t *base, uint32x4_t offset, uint32x4_t value, mve_pred16_t p)
+ {
+- vstrwq_scatter_offset_p_u32 (base, offset, value, p);
++ return vstrwq_scatter_offset_p_u32 (base, offset, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrwt.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint32_t * base, uint32x4_t offset, uint32x4_t value, mve_pred16_t p)
++foo1 (uint32_t *base, uint32x4_t offset, uint32x4_t value, mve_pred16_t p)
+ {
+- vstrwq_scatter_offset_p (base, offset, value, p);
++ return vstrwq_scatter_offset_p (base, offset, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrwt.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (int32_t * base, uint32x4_t offset, int32x4_t value)
++foo (int32_t *base, uint32x4_t offset, int32x4_t value)
+ {
+- vstrwq_scatter_offset_s32 (base, offset, value);
++ return vstrwq_scatter_offset_s32 (base, offset, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrw.32" } } */
+
++/*
++**foo1:
++** ...
++** vstrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int32_t * base, uint32x4_t offset, int32x4_t value)
++foo1 (int32_t *base, uint32x4_t offset, int32x4_t value)
+ {
+- vstrwq_scatter_offset (base, offset, value);
++ return vstrwq_scatter_offset (base, offset, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrw.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint32_t * base, uint32x4_t offset, uint32x4_t value)
++foo (uint32_t *base, uint32x4_t offset, uint32x4_t value)
+ {
+- vstrwq_scatter_offset_u32 (base, offset, value);
++ return vstrwq_scatter_offset_u32 (base, offset, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrw.32" } } */
+
++/*
++**foo1:
++** ...
++** vstrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint32_t * base, uint32x4_t offset, uint32x4_t value)
++foo1 (uint32_t *base, uint32x4_t offset, uint32x4_t value)
+ {
+- vstrwq_scatter_offset (base, offset, value);
++ return vstrwq_scatter_offset (base, offset, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrw.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #2\](?: @.*|)
++** ...
++*/
+ void
+-foo (float32_t * base, uint32x4_t offset, float32x4_t value)
++foo (float32_t *base, uint32x4_t offset, float32x4_t value)
+ {
+- vstrwq_scatter_shifted_offset_f32 (base, offset, value);
++ return vstrwq_scatter_shifted_offset_f32 (base, offset, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrw.32" } } */
+
++/*
++**foo1:
++** ...
++** vstrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #2\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (float32_t * base, uint32x4_t offset, float32x4_t value)
++foo1 (float32_t *base, uint32x4_t offset, float32x4_t value)
+ {
+- vstrwq_scatter_shifted_offset (base, offset, value);
++ return vstrwq_scatter_shifted_offset (base, offset, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrw.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_p_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_p_f32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #2\](?: @.*|)
++** ...
++*/
+ void
+-foo (float32_t * base, uint32x4_t offset, float32x4_t value, mve_pred16_t p)
++foo (float32_t *base, uint32x4_t offset, float32x4_t value, mve_pred16_t p)
+ {
+- vstrwq_scatter_shifted_offset_p_f32 (base, offset, value, p);
++ return vstrwq_scatter_shifted_offset_p_f32 (base, offset, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrwt.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #2\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (float32_t * base, uint32x4_t offset, float32x4_t value, mve_pred16_t p)
++foo1 (float32_t *base, uint32x4_t offset, float32x4_t value, mve_pred16_t p)
+ {
+- vstrwq_scatter_shifted_offset_p (base, offset, value, p);
++ return vstrwq_scatter_shifted_offset_p (base, offset, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrwt.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_p_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_p_s32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #2\](?: @.*|)
++** ...
++*/
+ void
+-foo (int32_t * base, uint32x4_t offset, int32x4_t value, mve_pred16_t p)
++foo (int32_t *base, uint32x4_t offset, int32x4_t value, mve_pred16_t p)
+ {
+- vstrwq_scatter_shifted_offset_p_s32 (base, offset, value, p);
++ return vstrwq_scatter_shifted_offset_p_s32 (base, offset, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrwt.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #2\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int32_t * base, uint32x4_t offset, int32x4_t value, mve_pred16_t p)
++foo1 (int32_t *base, uint32x4_t offset, int32x4_t value, mve_pred16_t p)
+ {
+- vstrwq_scatter_shifted_offset_p (base, offset, value, p);
++ return vstrwq_scatter_shifted_offset_p (base, offset, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrwt.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_p_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_p_u32.c
+@@ -1,21 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #2\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint32_t * base, uint32x4_t offset, uint32x4_t value, mve_pred16_t p)
++foo (uint32_t *base, uint32x4_t offset, uint32x4_t value, mve_pred16_t p)
+ {
+- vstrwq_scatter_shifted_offset_p_u32 (base, offset, value, p);
++ return vstrwq_scatter_shifted_offset_p_u32 (base, offset, value, p);
+ }
+
+-/* { dg-final { scan-assembler "vstrwt.32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vstrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #2\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint32_t * base, uint32x4_t offset, uint32x4_t value, mve_pred16_t p)
++foo1 (uint32_t *base, uint32x4_t offset, uint32x4_t value, mve_pred16_t p)
+ {
+- vstrwq_scatter_shifted_offset_p (base, offset, value, p);
++ return vstrwq_scatter_shifted_offset_p (base, offset, value, p);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrwt.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #2\](?: @.*|)
++** ...
++*/
+ void
+-foo (int32_t * base, uint32x4_t offset, int32x4_t value)
++foo (int32_t *base, uint32x4_t offset, int32x4_t value)
+ {
+- vstrwq_scatter_shifted_offset_s32 (base, offset, value);
++ return vstrwq_scatter_shifted_offset_s32 (base, offset, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrw.32" } } */
+
++/*
++**foo1:
++** ...
++** vstrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #2\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (int32_t * base, uint32x4_t offset, int32x4_t value)
++foo1 (int32_t *base, uint32x4_t offset, int32x4_t value)
+ {
+- vstrwq_scatter_shifted_offset (base, offset, value);
++ return vstrwq_scatter_shifted_offset (base, offset, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrw.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #2\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint32_t * base, uint32x4_t offset, uint32x4_t value)
++foo (uint32_t *base, uint32x4_t offset, uint32x4_t value)
+ {
+- vstrwq_scatter_shifted_offset_u32 (base, offset, value);
++ return vstrwq_scatter_shifted_offset_u32 (base, offset, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrw.32" } } */
+
++/*
++**foo1:
++** ...
++** vstrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+), q[0-9]+, uxtw #2\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint32_t * base, uint32x4_t offset, uint32x4_t value)
++foo1 (uint32_t *base, uint32x4_t offset, uint32x4_t value)
+ {
+- vstrwq_scatter_shifted_offset (base, offset, value);
++ return vstrwq_scatter_shifted_offset (base, offset, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrw.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vstrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo (uint32_t * addr, uint32x4_t value)
++foo (uint32_t *base, uint32x4_t value)
+ {
+- vstrwq_u32 (addr, value);
++ return vstrwq_u32 (base, value);
+ }
+
+-/* { dg-final { scan-assembler "vstrw.32" } } */
+
++/*
++**foo1:
++** ...
++** vstrw.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\](?: @.*|)
++** ...
++*/
+ void
+-foo1 (uint32_t * addr, uint32x4_t value)
++foo1 (uint32_t *base, uint32x4_t value)
+ {
+- vstrwq (addr, value);
++ return vstrwq (base, value);
++}
++
++#ifdef __cplusplus
+ }
++#endif
+
+-/* { dg-final { scan-assembler "vstrw.32" } } */
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_f16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vsub.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b)
+ {
+ return vsubq_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.f16" } } */
+
++/*
++**foo1:
++** ...
++** vsub.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16x8_t b)
+ {
+ return vsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_f32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vsub.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b)
+ {
+ return vsubq_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.f32" } } */
+
++/*
++**foo1:
++** ...
++** vsub.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32x4_t b)
+ {
+ return vsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_f16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vsubq_m_f16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+ return vsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.f16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_f32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vsubq_m_f32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+ return vsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.f32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_n_f16-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float16x8_t
+-foo1 (float16x8_t inactive, float16x8_t a, float16_t b, mve_pred16_t p)
+-{
+- return vsubq_m (inactive, a, 23.23, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_n_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_n_f16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t inactive, float16x8_t a, float16_t b, mve_pred16_t p)
+ {
+ return vsubq_m_n_f16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t inactive, float16x8_t a, float16_t b, mve_pred16_t p)
+ {
+ return vsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.f16" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float16x8_t
++foo2 (float16x8_t inactive, float16x8_t a, mve_pred16_t p)
++{
++ return vsubq_m (inactive, a, 1.1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_n_f32-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float32x4_t
+-foo1 (float32x4_t inactive, float32x4_t a, float32_t b, mve_pred16_t p)
+-{
+- return vsubq_m (inactive, a, 23.23, p);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_n_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_n_f32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t inactive, float32x4_t a, float32_t b, mve_pred16_t p)
+ {
+ return vsubq_m_n_f32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t inactive, float32x4_t a, float32_t b, mve_pred16_t p)
+ {
+ return vsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.f32" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float32x4_t
++foo2 (float32x4_t inactive, float32x4_t a, mve_pred16_t p)
++{
++ return vsubq_m (inactive, a, 1.1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_n_s16.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vsubq_m_n_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+ return vsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_n_s32.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vsubq_m_n_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+ return vsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_n_s8.c
+@@ -1,23 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vsubq_m_n_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+ return vsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_n_u16.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, uint16_t b, mve_pred16_t p)
+ {
+ return vsubq_m_n_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, uint16_t b, mve_pred16_t p)
+ {
+ return vsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.i16" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t inactive, uint16x8_t a, mve_pred16_t p)
++{
++ return vsubq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_n_u32.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vsubq_m_n_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, uint32_t b, mve_pred16_t p)
+ {
+ return vsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.i32" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t inactive, uint32x4_t a, mve_pred16_t p)
++{
++ return vsubq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_n_u8.c
+@@ -1,23 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, uint8_t b, mve_pred16_t p)
+ {
+ return vsubq_m_n_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, uint8_t b, mve_pred16_t p)
+ {
+ return vsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.i8" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
++{
++ return vsubq_m (inactive, a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_s16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vsubq_m_s16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+ return vsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_s32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vsubq_m_s32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+ return vsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_s8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vsubq_m_s8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+ return vsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_u16.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vsubq_m_u16 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+ return vsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_u32.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vsubq_m_u32 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+ return vsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_m_u8.c
+@@ -1,22 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vsubq_m_u8 (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+ return vsubq_m (inactive, a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_f16-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float16x8_t
+-foo1 (float16x8_t a, float16_t b)
+-{
+- return vsubq (a, 23.23);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_f16.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vsub.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16_t b)
+ {
+ return vsubq_n_f16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.f16" } } */
+
++/*
++**foo1:
++** ...
++** vsub.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo1 (float16x8_t a, float16_t b)
+ {
+ return vsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.f16" } } */
++/*
++**foo2:
++** ...
++** vsub.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float16x8_t
++foo2 (float16x8_t a)
++{
++ return vsubq (a, 1.1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_f32-1.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float32x4_t
+-foo1 (float32x4_t a, float32_t b)
+-{
+- return vsubq (a, 23.23);
+-}
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_f32.c
+@@ -1,21 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vsub.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32_t b)
+ {
+ return vsubq_n_f32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.f32" } } */
+
++/*
++**foo1:
++** ...
++** vsub.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo1 (float32x4_t a, float32_t b)
+ {
+ return vsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.f32" } } */
++/*
++**foo2:
++** ...
++** vsub.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float32x4_t
++foo2 (float32x4_t a)
++{
++ return vsubq (a, 1.1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_s16.c
+@@ -1,22 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
+-/* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vsub.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16_t b)
+ {
+ return vsubq_n_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.i16" } } */
+
++/*
++**foo1:
++** ...
++** vsub.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16_t b)
+ {
+ return vsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_s32.c
+@@ -1,22 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
+-/* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vsub.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32_t b)
+ {
+ return vsubq_n_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.i32" } } */
+
++/*
++**foo1:
++** ...
++** vsub.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32_t b)
+ {
+ return vsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_s8.c
+@@ -1,22 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
+-/* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vsub.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8_t b)
+ {
+ return vsubq_n_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.i8" } } */
+
++/*
++**foo1:
++** ...
++** vsub.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8_t b)
+ {
+ return vsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_u16.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
+-/* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vsub.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16_t b)
+ {
+ return vsubq_n_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.i16" } } */
+
++/*
++**foo1:
++** ...
++** vsub.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16_t b)
+ {
+ return vsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.i16" } } */
++/*
++**foo2:
++** ...
++** vsub.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t a)
++{
++ return vsubq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_u32.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
+-/* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vsub.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32_t b)
+ {
+ return vsubq_n_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.i32" } } */
+
++/*
++**foo1:
++** ...
++** vsub.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32_t b)
+ {
+ return vsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.i32" } } */
++/*
++**foo2:
++** ...
++** vsub.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t a)
++{
++ return vsubq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_u8.c
+@@ -1,22 +1,53 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
+-/* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vsub.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8_t b)
+ {
+ return vsubq_n_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.i8" } } */
+
++/*
++**foo1:
++** ...
++** vsub.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8_t b)
+ {
+ return vsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.i8" } } */
++/*
++**foo2:
++** ...
++** vsub.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t a)
++{
++ return vsubq (a, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_s16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vsub.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b)
+ {
+ return vsubq_s16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.i16" } } */
+
++/*
++**foo1:
++** ...
++** vsub.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo1 (int16x8_t a, int16x8_t b)
+ {
+ return vsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_s32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vsub.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b)
+ {
+ return vsubq_s32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.i32" } } */
+
++/*
++**foo1:
++** ...
++** vsub.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo1 (int32x4_t a, int32x4_t b)
+ {
+ return vsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_s8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vsub.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b)
+ {
+ return vsubq_s8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.i8" } } */
+
++/*
++**foo1:
++** ...
++** vsub.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo1 (int8x16_t a, int8x16_t b)
+ {
+ return vsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_u16.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vsub.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b)
+ {
+ return vsubq_u16 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.i16" } } */
+
++/*
++**foo1:
++** ...
++** vsub.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo1 (uint16x8_t a, uint16x8_t b)
+ {
+ return vsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.i16" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_u32.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vsub.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b)
+ {
+ return vsubq_u32 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.i32" } } */
+
++/*
++**foo1:
++** ...
++** vsub.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo1 (uint32x4_t a, uint32x4_t b)
+ {
+ return vsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.i32" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_u8.c
+@@ -1,21 +1,41 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vsub.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+ return vsubq_u8 (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.i8" } } */
+
++/*
++**foo1:
++** ...
++** vsub.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo1 (uint8x16_t a, uint8x16_t b)
+ {
+ return vsubq (a, b);
+ }
+
+-/* { dg-final { scan-assembler "vsub.i8" } } */
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_f16.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+ {
+- return vsubq_x_f16 (a, b, p);
++ return vsubq_x_f16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.f16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.f16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++float16x8_t
++foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
++{
++ return vsubq_x (a, b, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_f32.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+ {
+- return vsubq_x_f32 (a, b, p);
++ return vsubq_x_f32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.f32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.f32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++float32x4_t
++foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
++{
++ return vsubq_x (a, b, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_f16-1.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float16x8_t
+-foo (float16x8_t a, float16_t b, mve_pred16_t p)
+-{
+- return vsubq_x_n_f16 (a, 23.23, p);
+-}
+-
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_f16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_f16.c
+@@ -1,15 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float16x8_t
+ foo (float16x8_t a, float16_t b, mve_pred16_t p)
+ {
+- return vsubq_x_n_f16 (a, b, p);
++ return vsubq_x_n_f16 (a, b, p);
++}
++
++
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float16x8_t
++foo1 (float16x8_t a, float16_t b, mve_pred16_t p)
++{
++ return vsubq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.f16" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.f16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float16x8_t
++foo2 (float16x8_t a, mve_pred16_t p)
++{
++ return vsubq_x (a, 1.1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
+
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+deleted file mode 100644
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_f32-1.c
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+-/* { dg-add-options arm_v8_1m_mve_fp } */
+-/* { dg-additional-options "-O2" } */
+-
+-#include "arm_mve.h"
+-float32x4_t
+-foo (float32x4_t a, float32_t b, mve_pred16_t p)
+-{
+- return vsubq_x_n_f32 (a, 23.23, p);
+-}
+-
+-
+-/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_f32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_f32.c
+@@ -1,15 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+ /* { dg-add-options arm_v8_1m_mve_fp } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ float32x4_t
+ foo (float32x4_t a, float32_t b, mve_pred16_t p)
+ {
+- return vsubq_x_n_f32 (a, b, p);
++ return vsubq_x_n_f32 (a, b, p);
++}
++
++
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float32x4_t
++foo1 (float32x4_t a, float32_t b, mve_pred16_t p)
++{
++ return vsubq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.f32" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.f32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++float32x4_t
++foo2 (float32x4_t a, mve_pred16_t p)
++{
++ return vsubq_x (a, 1.1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
+
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_s16.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16_t b, mve_pred16_t p)
+ {
+- return vsubq_x_n_s16 (a, b, p);
++ return vsubq_x_n_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++int16x8_t
++foo1 (int16x8_t a, int16_t b, mve_pred16_t p)
++{
++ return vsubq_x (a, b, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_s32.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32_t b, mve_pred16_t p)
+ {
+- return vsubq_x_n_s32 (a, b, p);
++ return vsubq_x_n_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++int32x4_t
++foo1 (int32x4_t a, int32_t b, mve_pred16_t p)
++{
++ return vsubq_x (a, b, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_s8.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8_t b, mve_pred16_t p)
+ {
+- return vsubq_x_n_s8 (a, b, p);
++ return vsubq_x_n_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++int8x16_t
++foo1 (int8x16_t a, int8_t b, mve_pred16_t p)
++{
++ return vsubq_x (a, b, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_u16.c
+@@ -1,15 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16_t b, mve_pred16_t p)
+ {
+- return vsubq_x_n_u16 (a, b, p);
++ return vsubq_x_n_u16 (a, b, p);
++}
++
++
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo1 (uint16x8_t a, uint16_t b, mve_pred16_t p)
++{
++ return vsubq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.i16" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i16 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo2 (uint16x8_t a, mve_pred16_t p)
++{
++ return vsubq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
+
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_u32.c
+@@ -1,15 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32_t b, mve_pred16_t p)
+ {
+- return vsubq_x_n_u32 (a, b, p);
++ return vsubq_x_n_u32 (a, b, p);
++}
++
++
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo1 (uint32x4_t a, uint32_t b, mve_pred16_t p)
++{
++ return vsubq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.i32" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i32 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo2 (uint32x4_t a, mve_pred16_t p)
++{
++ return vsubq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
+
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_u8.c
+@@ -1,15 +1,65 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8_t b, mve_pred16_t p)
+ {
+- return vsubq_x_n_u8 (a, b, p);
++ return vsubq_x_n_u8 (a, b, p);
++}
++
++
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo1 (uint8x16_t a, uint8_t b, mve_pred16_t p)
++{
++ return vsubq_x (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.i8" } } */
++/*
++**foo2:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i8 q[0-9]+, q[0-9]+, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo2 (uint8x16_t a, mve_pred16_t p)
++{
++ return vsubq_x (a, 1, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
+
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_s16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_s16.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int16x8_t
+ foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+ {
+- return vsubq_x_s16 (a, b, p);
++ return vsubq_x_s16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++int16x8_t
++foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
++{
++ return vsubq_x (a, b, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_s32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_s32.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int32x4_t
+ foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+ {
+- return vsubq_x_s32 (a, b, p);
++ return vsubq_x_s32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++int32x4_t
++foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
++{
++ return vsubq_x (a, b, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_s8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_s8.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ int8x16_t
+ foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+ {
+- return vsubq_x_s8 (a, b, p);
++ return vsubq_x_s8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++int8x16_t
++foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
++{
++ return vsubq_x (a, b, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_u16.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_u16.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint16x8_t
+ foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+ {
+- return vsubq_x_u16 (a, b, p);
++ return vsubq_x_u16 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.i16" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i16 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++uint16x8_t
++foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
++{
++ return vsubq_x (a, b, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_u32.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_u32.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint32x4_t
+ foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+ {
+- return vsubq_x_u32 (a, b, p);
++ return vsubq_x_u32 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.i32" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i32 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++uint32x4_t
++foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
++{
++ return vsubq_x (a, b, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+--- a/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_u8.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_u8.c
+@@ -1,15 +1,49 @@
+ /* { dg-require-effective-target arm_v8_1m_mve_ok } */
+ /* { dg-add-options arm_v8_1m_mve } */
+ /* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+
+ #include "arm_mve.h"
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+ {
+- return vsubq_x_u8 (a, b, p);
++ return vsubq_x_u8 (a, b, p);
+ }
+
+-/* { dg-final { scan-assembler "vpst" } } */
+-/* { dg-final { scan-assembler "vsubt.i8" } } */
+
++/*
++**foo1:
++** ...
++** vmsr p0, (?:ip|fp|r[0-9]+)(?: @.*|)
++** ...
++** vpst(?: @.*|)
++** ...
++** vsubt.i8 q[0-9]+, q[0-9]+, q[0-9]+(?: @.*|)
++** ...
++*/
++uint8x16_t
++foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
++{
++ return vsubq_x (a, b, p);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+\ No newline at end of file
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/mve_const_shifts.c
+@@ -0,0 +1,41 @@
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-add-options arm_v8_1m_mve } */
++/* { dg-additional-options "-O2" } */
++/* { dg-final { check-function-bodies "**" "" } } */
++
++#include "arm_mve.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++**foo11:
++** ...
++** movs r0, #2
++** ...
++*/
++uint32_t
++foo11 ()
++{
++ return uqshl (1, 1);
++}
++
++/*
++**foo12:
++** ...
++** movs r0, #2
++** movs r1, #0
++** ...
++*/
++uint64_t
++foo12 ()
++{
++ return uqshll (1, 1);
++}
++
++#ifdef __cplusplus
++}
++#endif
++
++/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+--- a/src/gcc/testsuite/gcc.target/arm/mve/mve_load_memory_modes.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/mve_load_memory_modes.c
+@@ -7,7 +7,7 @@
+ /*
+ **off_load8_0:
+ ** ...
+-** vldrb.8 q0, \[r0, #16\]
++** vldrb.8 q[0-7], \[r0, #16\]
+ ** ...
+ */
+ int8x16_t off_load8_0 (int8_t * a)
+@@ -18,7 +18,7 @@ int8x16_t off_load8_0 (int8_t * a)
+ /*
+ **off_load8_1:
+ ** ...
+-** vldrb.u16 q0, \[r0, #1\]
++** vldrb.u16 q[0-7], \[r0, #1\]
+ ** ...
+ */
+ uint16x8_t off_load8_1 (uint8_t * a)
+@@ -29,7 +29,7 @@ uint16x8_t off_load8_1 (uint8_t * a)
+ /*
+ **off_load8_2:
+ ** ...
+-** vldrb.s32 q0, \[r0, #127\]
++** vldrb.s32 q[0-7], \[r0, #127\]
+ ** ...
+ */
+ int32x4_t off_load8_2 (int8_t * a)
+@@ -40,7 +40,7 @@ int32x4_t off_load8_2 (int8_t * a)
+ /*
+ **off_load8_3:
+ ** ...
+-** vldrb.8 q0, \[r0, #-127\]
++** vldrb.8 q[0-7], \[r0, #-127\]
+ ** ...
+ */
+ uint8x16_t off_load8_3 (uint8_t * a)
+@@ -51,7 +51,7 @@ uint8x16_t off_load8_3 (uint8_t * a)
+ /*
+ **not_off_load8_0:
+ ** ...
+-** vldrb.8 q0, \[r[0-9]+\]
++** vldrb.8 q[0-7], \[r[0-7]+\]
+ ** ...
+ */
+ int8x16_t not_off_load8_0 (int8_t * a)
+@@ -62,7 +62,7 @@ int8x16_t not_off_load8_0 (int8_t * a)
+ /*
+ **off_loadfp16_0:
+ ** ...
+-** vldrh.16 q0, \[r0, #-244\]
++** vldrh.16 q[0-7], \[r0, #-244\]
+ ** ...
+ */
+ float16x8_t off_loadfp16_0 (float16_t *a)
+@@ -73,7 +73,7 @@ float16x8_t off_loadfp16_0 (float16_t *a)
+ /*
+ **off_load16_0:
+ ** ...
+-** vldrh.16 q0, \[r0, #-2\]
++** vldrh.16 q[0-7], \[r0, #-2\]
+ ** ...
+ */
+ uint16x8_t off_load16_0 (uint16_t * a)
+@@ -84,7 +84,7 @@ uint16x8_t off_load16_0 (uint16_t * a)
+ /*
+ **off_load16_1:
+ ** ...
+-** vldrh.u32 q0, \[r0, #254\]
++** vldrh.u32 q[0-7], \[r0, #254\]
+ ** ...
+ */
+ uint32x4_t off_load16_1 (uint16_t * a)
+@@ -95,7 +95,7 @@ uint32x4_t off_load16_1 (uint16_t * a)
+ /*
+ **not_off_load16_0:
+ ** ...
+-** vldrh.16 q0, \[r[0-9]+\]
++** vldrh.16 q[0-7], \[r[0-7]+\]
+ ** ...
+ */
+ int16x8_t not_off_load16_0 (int8_t * a)
+@@ -106,7 +106,7 @@ int16x8_t not_off_load16_0 (int8_t * a)
+ /*
+ **not_off_load16_1:
+ ** ...
+-** vldrh.u32 q0, \[r[0-9]+\]
++** vldrh.u32 q[0-7], \[r[0-7]+\]
+ ** ...
+ */
+ uint32x4_t not_off_load16_1 (uint16_t * a)
+@@ -117,7 +117,7 @@ uint32x4_t not_off_load16_1 (uint16_t * a)
+ /*
+ **off_loadfp32_0:
+ ** ...
+-** vldrw.32 q0, \[r0, #24\]
++** vldrw.32 q[0-7], \[r0, #24\]
+ ** ...
+ */
+ float32x4_t off_loadfp32_0 (float32_t *a)
+@@ -128,7 +128,7 @@ float32x4_t off_loadfp32_0 (float32_t *a)
+ /*
+ **off_load32_0:
+ ** ...
+-** vldrw.32 q0, \[r0, #4\]
++** vldrw.32 q[0-7], \[r0, #4\]
+ ** ...
+ */
+ uint32x4_t off_load32_0 (uint32_t * a)
+@@ -139,7 +139,7 @@ uint32x4_t off_load32_0 (uint32_t * a)
+ /*
+ **off_load32_1:
+ ** ...
+-** vldrw.32 q0, \[r0, #-508\]
++** vldrw.32 q[0-7], \[r0, #-508\]
+ ** ...
+ */
+ int32x4_t off_load32_1 (int32_t * a)
+@@ -149,7 +149,7 @@ int32x4_t off_load32_1 (int32_t * a)
+ /*
+ **pre_load8_0:
+ ** ...
+-** vldrb.8 q[0-9]+, \[r0, #16\]!
++** vldrb.8 q[0-7], \[r0, #16\]!
+ ** ...
+ */
+ int8_t* pre_load8_0 (int8_t * a, int8x16_t *v)
+@@ -162,7 +162,7 @@ int8_t* pre_load8_0 (int8_t * a, int8x16_t *v)
+ /*
+ **pre_load8_1:
+ ** ...
+-** vldrb.u16 q[0-9]+, \[r0, #4\]!
++** vldrb.u16 q[0-7], \[r0, #4\]!
+ ** ...
+ */
+ uint8_t* pre_load8_1 (uint8_t * a, uint16x8_t *v)
+@@ -175,7 +175,7 @@ uint8_t* pre_load8_1 (uint8_t * a, uint16x8_t *v)
+ /*
+ **pre_loadfp16_0:
+ ** ...
+-** vldrh.16 q[0-9]+, \[r0, #128\]!
++** vldrh.16 q[0-7], \[r0, #128\]!
+ ** ...
+ */
+ float16_t* pre_loadfp16_0 (float16_t *a, float16x8_t *v)
+@@ -188,7 +188,7 @@ float16_t* pre_loadfp16_0 (float16_t *a, float16x8_t *v)
+ /*
+ **pre_load16_0:
+ ** ...
+-** vldrh.16 q[0-9]+, \[r0, #-254\]!
++** vldrh.16 q[0-7], \[r0, #-254\]!
+ ** ...
+ */
+ int16_t* pre_load16_0 (int16_t * a, int16x8_t *v)
+@@ -201,7 +201,7 @@ int16_t* pre_load16_0 (int16_t * a, int16x8_t *v)
+ /*
+ **pre_load16_1:
+ ** ...
+-** vldrh.s32 q[0-9]+, \[r0, #52\]!
++** vldrh.s32 q[0-7], \[r0, #52\]!
+ ** ...
+ */
+ int16_t* pre_load16_1 (int16_t * a, int32x4_t *v)
+@@ -214,7 +214,7 @@ int16_t* pre_load16_1 (int16_t * a, int32x4_t *v)
+ /*
+ **pre_loadfp32_0:
+ ** ...
+-** vldrw.32 q[0-9]+, \[r0, #-72\]!
++** vldrw.32 q[0-7], \[r0, #-72\]!
+ ** ...
+ */
+ float32_t* pre_loadfp32_0 (float32_t *a, float32x4_t *v)
+@@ -228,7 +228,7 @@ float32_t* pre_loadfp32_0 (float32_t *a, float32x4_t *v)
+ /*
+ **pre_load32_0:
+ ** ...
+-** vldrw.32 q[0-9]+, \[r0, #-4\]!
++** vldrw.32 q[0-7], \[r0, #-4\]!
+ ** ...
+ */
+ uint32_t* pre_load32_0 (uint32_t * a, uint32x4_t *v)
+@@ -242,7 +242,7 @@ uint32_t* pre_load32_0 (uint32_t * a, uint32x4_t *v)
+ /*
+ **post_load8_0:
+ ** ...
+-** vldrb.8 q[0-9]+, \[r0\], #26
++** vldrb.8 q[0-7], \[r0\], #26
+ ** ...
+ */
+ uint8_t* post_load8_0 (uint8_t * a, uint8x16_t *v)
+@@ -255,7 +255,7 @@ uint8_t* post_load8_0 (uint8_t * a, uint8x16_t *v)
+ /*
+ **post_load8_1:
+ ** ...
+-** vldrb.s16 q[0-9]+, \[r0\], #-1
++** vldrb.s16 q[0-7], \[r0\], #-1
+ ** ...
+ */
+ int8_t* post_load8_1 (int8_t * a, int16x8_t *v)
+@@ -268,7 +268,7 @@ int8_t* post_load8_1 (int8_t * a, int16x8_t *v)
+ /*
+ **post_load8_2:
+ ** ...
+-** vldrb.8 q[0-9]+, \[r0\], #26
++** vldrb.8 q[0-7], \[r0\], #26
+ ** ...
+ */
+ uint8_t* post_load8_2 (uint8_t * a, uint8x16_t *v)
+@@ -281,7 +281,7 @@ uint8_t* post_load8_2 (uint8_t * a, uint8x16_t *v)
+ /*
+ **post_load8_3:
+ ** ...
+-** vldrb.s16 q[0-9]+, \[r0\], #-1
++** vldrb.s16 q[0-7], \[r0\], #-1
+ ** ...
+ */
+ int8_t* post_load8_3 (int8_t * a, int16x8_t *v)
+@@ -294,7 +294,7 @@ int8_t* post_load8_3 (int8_t * a, int16x8_t *v)
+ /*
+ **post_loadfp16_0:
+ ** ...
+-** vldrh.16 q[0-9]+, \[r0\], #-24
++** vldrh.16 q[0-7], \[r0\], #-24
+ ** ...
+ */
+ float16_t* post_loadfp16_0 (float16_t *a, float16x8_t *v)
+@@ -307,7 +307,7 @@ float16_t* post_loadfp16_0 (float16_t *a, float16x8_t *v)
+ /*
+ **post_load16_0:
+ ** ...
+-** vldrh.16 q[0-9]+, \[r0\], #-126
++** vldrh.16 q[0-7], \[r0\], #-126
+ ** ...
+ */
+ uint16_t* post_load16_0 (uint16_t * a, uint16x8_t *v)
+@@ -320,7 +320,7 @@ uint16_t* post_load16_0 (uint16_t * a, uint16x8_t *v)
+ /*
+ **post_load16_1:
+ ** ...
+-** vldrh.u32 q[0-9]+, \[r0\], #16
++** vldrh.u32 q[0-7], \[r0\], #16
+ ** ...
+ */
+ uint16_t* post_load16_1 (uint16_t * a, uint32x4_t *v)
+@@ -333,7 +333,7 @@ uint16_t* post_load16_1 (uint16_t * a, uint32x4_t *v)
+ /*
+ **post_loadfp32_0:
+ ** ...
+-** vldrw.32 q[0-9]+, \[r0\], #4
++** vldrw.32 q[0-7], \[r0\], #4
+ ** ...
+ */
+ float32_t* post_loadfp32_0 (float32_t *a, float32x4_t *v)
+@@ -346,7 +346,7 @@ float32_t* post_loadfp32_0 (float32_t *a, float32x4_t *v)
+ /*
+ **post_load32_0:
+ ** ...
+-** vldrw.32 q[0-9]+, \[r0\], #-16
++** vldrw.32 q[0-7], \[r0\], #-16
+ ** ...
+ */
+ int32_t* post_load32_0 (int32_t * a, int32x4_t *v)
+--- a/src/gcc/testsuite/gcc.target/arm/mve/mve_store_memory_modes.c
++++ b/src/gcc/testsuite/gcc.target/arm/mve/mve_store_memory_modes.c
+@@ -7,7 +7,7 @@
+ /*
+ **off_store8_0:
+ ** ...
+-** vstrb.8 q0, \[r0, #16\]
++** vstrb.8 q[0-7], \[r0, #16\]
+ ** ...
+ */
+ uint8_t *off_store8_0 (uint8_t * a, uint8x16_t v)
+@@ -19,7 +19,7 @@ uint8_t *off_store8_0 (uint8_t * a, uint8x16_t v)
+ /*
+ **off_store8_1:
+ ** ...
+-** vstrb.16 q0, \[r0, #-1\]
++** vstrb.16 q[0-7], \[r0, #-1\]
+ ** ...
+ */
+ int8_t *off_store8_1 (int8_t * a, int16x8_t v)
+@@ -31,7 +31,7 @@ int8_t *off_store8_1 (int8_t * a, int16x8_t v)
+ /*
+ **off_store8_2:
+ ** ...
+-** vstrb.32 q0, \[r0, #-127\]
++** vstrb.32 q[0-7], \[r0, #-127\]
+ ** ...
+ */
+ uint8_t *off_store8_2 (uint8_t * a, uint32x4_t v)
+@@ -43,7 +43,7 @@ uint8_t *off_store8_2 (uint8_t * a, uint32x4_t v)
+ /*
+ **off_store8_3:
+ ** ...
+-** vstrb.8 q0, \[r0, #127\]
++** vstrb.8 q[0-7], \[r0, #127\]
+ ** ...
+ */
+ int8_t *off_store8_3 (int8_t * a, int8x16_t v)
+@@ -55,7 +55,7 @@ int8_t *off_store8_3 (int8_t * a, int8x16_t v)
+ /*
+ **not_off_store8_0:
+ ** ...
+-** vstrb.8 q0, \[r[0-9]+\]
++** vstrb.8 q[0-7], \[r[0-7]+\]
+ ** ...
+ */
+ uint8_t *not_off_store8_0 (uint8_t * a, uint8x16_t v)
+@@ -67,7 +67,7 @@ uint8_t *not_off_store8_0 (uint8_t * a, uint8x16_t v)
+ /*
+ **off_storefp16_0:
+ ** ...
+-** vstrh.16 q0, \[r0, #250\]
++** vstrh.16 q[0-7], \[r0, #250\]
+ ** ...
+ */
+ float16_t *off_storefp16_0 (float16_t *a, float16x8_t v)
+@@ -79,7 +79,7 @@ float16_t *off_storefp16_0 (float16_t *a, float16x8_t v)
+ /*
+ **off_store16_0:
+ ** ...
+-** vstrh.16 q0, \[r0, #4\]
++** vstrh.16 q[0-7], \[r0, #4\]
+ ** ...
+ */
+ int16_t *off_store16_0 (int16_t * a, int16x8_t v)
+@@ -91,7 +91,7 @@ int16_t *off_store16_0 (int16_t * a, int16x8_t v)
+ /*
+ **off_store16_1:
+ ** ...
+-** vstrh.32 q0, \[r0, #-254\]
++** vstrh.32 q[0-7], \[r0, #-254\]
+ ** ...
+ */
+ int16_t *off_store16_1 (int16_t * a, int32x4_t v)
+@@ -103,7 +103,7 @@ int16_t *off_store16_1 (int16_t * a, int32x4_t v)
+ /*
+ **not_off_store16_0:
+ ** ...
+-** vstrh.16 q0, \[r[0-9]+\]
++** vstrh.16 q[0-7], \[r[0-7]+\]
+ ** ...
+ */
+ uint8_t *not_off_store16_0 (uint8_t * a, uint16x8_t v)
+@@ -115,7 +115,7 @@ uint8_t *not_off_store16_0 (uint8_t * a, uint16x8_t v)
+ /*
+ **not_off_store16_1:
+ ** ...
+-** vstrh.32 q0, \[r[0-9]+\]
++** vstrh.32 q[0-7], \[r[0-7]+\]
+ ** ...
+ */
+ int16_t *not_off_store16_1 (int16_t * a, int32x4_t v)
+@@ -127,7 +127,7 @@ int16_t *not_off_store16_1 (int16_t * a, int32x4_t v)
+ /*
+ **off_storefp32_0:
+ ** ...
+-** vstrw.32 q0, \[r0, #-412\]
++** vstrw.32 q[0-7], \[r0, #-412\]
+ ** ...
+ */
+ float32_t *off_storefp32_0 (float32_t *a, float32x4_t v)
+@@ -139,7 +139,7 @@ float32_t *off_storefp32_0 (float32_t *a, float32x4_t v)
+ /*
+ **off_store32_0:
+ ** ...
+-** vstrw.32 q0, \[r0, #-4\]
++** vstrw.32 q[0-7], \[r0, #-4\]
+ ** ...
+ */
+ int32_t *off_store32_0 (int32_t * a, int32x4_t v)
+@@ -151,7 +151,7 @@ int32_t *off_store32_0 (int32_t * a, int32x4_t v)
+ /*
+ **off_store32_1:
+ ** ...
+-** vstrw.32 q0, \[r0, #508\]
++** vstrw.32 q[0-7], \[r0, #508\]
+ ** ...
+ */
+ uint32_t *off_store32_1 (uint32_t * a, uint32x4_t v)
+@@ -163,7 +163,7 @@ uint32_t *off_store32_1 (uint32_t * a, uint32x4_t v)
+ /*
+ **pre_store8_0:
+ ** ...
+-** vstrb.8 q[0-9]+, \[r0, #-16\]!
++** vstrb.8 q[0-7], \[r0, #-16\]!
+ ** ...
+ */
+ uint8_t* pre_store8_0 (uint8_t * a, uint8x16_t v)
+@@ -176,7 +176,7 @@ uint8_t* pre_store8_0 (uint8_t * a, uint8x16_t v)
+ /*
+ **pre_store8_1:
+ ** ...
+-** vstrb.16 q[0-9]+, \[r0, #4\]!
++** vstrb.16 q[0-7], \[r0, #4\]!
+ ** ...
+ */
+ int8_t* pre_store8_1 (int8_t * a, int16x8_t v)
+@@ -189,7 +189,7 @@ int8_t* pre_store8_1 (int8_t * a, int16x8_t v)
+ /*
+ **pre_storefp16_0:
+ ** ...
+-** vstrh.16 q0, \[r0, #8\]!
++** vstrh.16 q[0-7], \[r0, #8\]!
+ ** ...
+ */
+ float16_t *pre_storefp16_0 (float16_t *a, float16x8_t v)
+@@ -202,7 +202,7 @@ float16_t *pre_storefp16_0 (float16_t *a, float16x8_t v)
+ /*
+ **pre_store16_0:
+ ** ...
+-** vstrh.16 q[0-9]+, \[r0, #254\]!
++** vstrh.16 q[0-7], \[r0, #254\]!
+ ** ...
+ */
+ uint16_t* pre_store16_0 (uint16_t * a, uint16x8_t v)
+@@ -215,7 +215,7 @@ uint16_t* pre_store16_0 (uint16_t * a, uint16x8_t v)
+ /*
+ **pre_store16_1:
+ ** ...
+-** vstrh.32 q[0-9]+, \[r0, #-52\]!
++** vstrh.32 q[0-7], \[r0, #-52\]!
+ ** ...
+ */
+ int16_t* pre_store16_1 (int16_t * a, int32x4_t v)
+@@ -228,7 +228,7 @@ int16_t* pre_store16_1 (int16_t * a, int32x4_t v)
+ /*
+ **pre_storefp32_0:
+ ** ...
+-** vstrw.32 q0, \[r0, #-4\]!
++** vstrw.32 q[0-7], \[r0, #-4\]!
+ ** ...
+ */
+ float32_t *pre_storefp32_0 (float32_t *a, float32x4_t v)
+@@ -241,7 +241,7 @@ float32_t *pre_storefp32_0 (float32_t *a, float32x4_t v)
+ /*
+ **pre_store32_0:
+ ** ...
+-** vstrw.32 q[0-9]+, \[r0, #4\]!
++** vstrw.32 q[0-7], \[r0, #4\]!
+ ** ...
+ */
+ int32_t* pre_store32_0 (int32_t * a, int32x4_t v)
+@@ -255,7 +255,7 @@ int32_t* pre_store32_0 (int32_t * a, int32x4_t v)
+ /*
+ **post_store8_0:
+ ** ...
+-** vstrb.8 q[0-9]+, \[r0\], #-26
++** vstrb.8 q[0-7], \[r0\], #-26
+ ** ...
+ */
+ int8_t* post_store8_0 (int8_t * a, int8x16_t v)
+@@ -268,7 +268,7 @@ int8_t* post_store8_0 (int8_t * a, int8x16_t v)
+ /*
+ **post_store8_1:
+ ** ...
+-** vstrb.16 q[0-9]+, \[r0\], #1
++** vstrb.16 q[0-7], \[r0\], #1
+ ** ...
+ */
+ uint8_t* post_store8_1 (uint8_t * a, uint16x8_t v)
+@@ -281,7 +281,7 @@ uint8_t* post_store8_1 (uint8_t * a, uint16x8_t v)
+ /*
+ **post_store8_2:
+ ** ...
+-** vstrb.8 q[0-9]+, \[r0\], #-26
++** vstrb.8 q[0-7], \[r0\], #-26
+ ** ...
+ */
+ int8_t* post_store8_2 (int8_t * a, int8x16_t v)
+@@ -294,7 +294,7 @@ int8_t* post_store8_2 (int8_t * a, int8x16_t v)
+ /*
+ **post_store8_3:
+ ** ...
+-** vstrb.16 q[0-9]+, \[r0\], #7
++** vstrb.16 q[0-7], \[r0\], #7
+ ** ...
+ */
+ uint8_t* post_store8_3 (uint8_t * a, uint16x8_t v)
+@@ -307,7 +307,7 @@ uint8_t* post_store8_3 (uint8_t * a, uint16x8_t v)
+ /*
+ **post_storefp16_0:
+ ** ...
+-** vstrh.16 q[0-9]+, \[r0\], #-16
++** vstrh.16 q[0-7], \[r0\], #-16
+ ** ...
+ */
+ float16_t *post_storefp16_0 (float16_t *a, float16x8_t v)
+@@ -320,7 +320,7 @@ float16_t *post_storefp16_0 (float16_t *a, float16x8_t v)
+ /*
+ **post_store16_0:
+ ** ...
+-** vstrh.16 q[0-9]+, \[r0\], #126
++** vstrh.16 q[0-7], \[r0\], #126
+ ** ...
+ */
+ int16_t* post_store16_0 (int16_t * a, int16x8_t v)
+@@ -333,7 +333,7 @@ int16_t* post_store16_0 (int16_t * a, int16x8_t v)
+ /*
+ **post_store16_1:
+ ** ...
+-** vstrh.32 q[0-9]+, \[r0\], #-16
++** vstrh.32 q[0-7], \[r0\], #-16
+ ** ...
+ */
+ uint16_t* post_store16_1 (uint16_t * a, uint32x4_t v)
+@@ -346,7 +346,7 @@ uint16_t* post_store16_1 (uint16_t * a, uint32x4_t v)
+ /*
+ **post_storefp32_0:
+ ** ...
+-** vstrw.32 q[0-9]+, \[r0\], #-16
++** vstrw.32 q[0-7], \[r0\], #-16
+ ** ...
+ */
+ float32_t* post_storefp32_0 (float32_t * a, float32x4_t v)
+@@ -359,7 +359,7 @@ float32_t* post_storefp32_0 (float32_t * a, float32x4_t v)
+ /*
+ **post_store32_0:
+ ** ...
+-** vstrw.32 q[0-9]+, \[r0\], #16
++** vstrw.32 q[0-7], \[r0\], #16
+ ** ...
+ */
+ int32_t* post_store32_0 (int32_t * a, int32x4_t v)
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/mve_vadcq_vsbcq_fpscr_overwrite.c
+@@ -0,0 +1,67 @@
++/* { dg-do run } */
++/* { dg-require-effective-target arm_mve_hw } */
++/* { dg-options "-O2" } */
++/* { dg-add-options arm_v8_1m_mve } */
++
++#include <arm_mve.h>
++
++volatile int32x4_t c1;
++volatile uint32x4_t c2;
++int *carry;
++
++int
++main ()
++{
++ int32x4_t a1 = vcreateq_s32 (0, 0);
++ int32x4_t b1 = vcreateq_s32 (0, 0);
++ int32x4_t inactive1 = vcreateq_s32 (0, 0);
++
++ uint32x4_t a2 = vcreateq_u32 (0, 0);
++ uint32x4_t b2 = vcreateq_u32 (0, 0);
++ uint32x4_t inactive2 = vcreateq_u32 (0, 0);
++
++ mve_pred16_t p = 0xFFFF;
++ (*carry) = 0xFFFFFFFF;
++
++ __builtin_arm_set_fpscr_nzcvqc (0);
++ c1 = vadcq (a1, b1, carry);
++ if (__builtin_arm_get_fpscr_nzcvqc () & !0x20000000)
++ __builtin_abort ();
++ (*carry) = 0xFFFFFFFF;
++ __builtin_arm_set_fpscr_nzcvqc (0);
++ c2 = vadcq (a2, b2, carry);
++ if (__builtin_arm_get_fpscr_nzcvqc () & !0x20000000)
++ __builtin_abort ();
++ (*carry) = 0xFFFFFFFF;
++ __builtin_arm_set_fpscr_nzcvqc (0);
++ c1 = vsbcq (a1, b1, carry);
++ if (__builtin_arm_get_fpscr_nzcvqc () & !0x20000000)
++ __builtin_abort ();
++ (*carry) = 0xFFFFFFFF;
++ __builtin_arm_set_fpscr_nzcvqc (0);
++ c2 = vsbcq (a2, b2, carry);
++ if (__builtin_arm_get_fpscr_nzcvqc () & !0x20000000)
++ __builtin_abort ();
++ (*carry) = 0xFFFFFFFF;
++ __builtin_arm_set_fpscr_nzcvqc (0);
++ c1 = vadcq_m (inactive1, a1, b1, carry, p);
++ if (__builtin_arm_get_fpscr_nzcvqc () & !0x20000000)
++ __builtin_abort ();
++ (*carry) = 0xFFFFFFFF;
++ __builtin_arm_set_fpscr_nzcvqc (0);
++ c2 = vadcq_m (inactive2, a2, b2, carry, p);
++ if (__builtin_arm_get_fpscr_nzcvqc () & !0x20000000)
++ __builtin_abort ();
++ (*carry) = 0xFFFFFFFF;
++ __builtin_arm_set_fpscr_nzcvqc (0);
++ c1 = vsbcq_m (inactive1, a1, b1, carry, p);
++ if (__builtin_arm_get_fpscr_nzcvqc () & !0x20000000)
++ __builtin_abort ();
++ (*carry) = 0xFFFFFFFF;
++ __builtin_arm_set_fpscr_nzcvqc (0);
++ c2 = vsbcq_m (inactive2, a2, b2, carry, p);
++ if (__builtin_arm_get_fpscr_nzcvqc () & !0x20000000)
++ __builtin_abort ();
++
++ return 0;
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/pr108177-1-run.c
+@@ -0,0 +1,6 @@
++/* { dg-do run } */
++/* { dg-require-effective-target arm_mve_hw } */
++/* { dg-options "-O2 --save-temps" } */
++/* { dg-add-options arm_v8_1m_mve } */
++
++#include "pr108177-1.c"
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/pr108177-1.c
+@@ -0,0 +1,20 @@
++/* { dg-do compile } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-options "-O2" } */
++/* { dg-add-options arm_v8_1m_mve } */
++/* { dg-final { check-function-bodies "**" "" "" } } */
++
++/*
++** test:
++**...
++** vstrbt.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\]
++**...
++** vstrbt.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\]
++**...
++*/
++
++#define TYPE uint8x16_t
++#define INTRINSIC vstrbq_u8
++#define INTRINSIC_P vstrbq_p_u8
++
++#include "pr108177.x"
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/pr108177-10-run.c
+@@ -0,0 +1,6 @@
++/* { dg-do run } */
++/* { dg-require-effective-target arm_mve_hw } */
++/* { dg-options "-O2" } */
++/* { dg-add-options arm_v8_1m_mve } */
++
++#include "pr108177-10.c"
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/pr108177-10.c
+@@ -0,0 +1,20 @@
++/* { dg-do compile } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-options "-O2" } */
++/* { dg-add-options arm_v8_1m_mve } */
++/* { dg-final { check-function-bodies "**" "" "" } } */
++
++/*
++** test:
++**...
++** vstrht.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\]
++**...
++** vstrht.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\]
++**...
++*/
++
++#define TYPE int32x4_t
++#define INTRINSIC vstrhq_s32
++#define INTRINSIC_P vstrhq_p_s32
++
++#include "pr108177.x"
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/pr108177-11-run.c
+@@ -0,0 +1,6 @@
++/* { dg-do run } */
++/* { dg-require-effective-target arm_mve_hw } */
++/* { dg-options "-O2" } */
++/* { dg-add-options arm_v8_1m_mve } */
++
++#include "pr108177-11.c"
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/pr108177-11.c
+@@ -0,0 +1,20 @@
++/* { dg-do compile } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-options "-O2" } */
++/* { dg-add-options arm_v8_1m_mve } */
++/* { dg-final { check-function-bodies "**" "" "" } } */
++
++/*
++** test:
++**...
++** vstrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\]
++**...
++** vstrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\]
++**...
++*/
++
++#define TYPE uint32x4_t
++#define INTRINSIC vstrwq_u32
++#define INTRINSIC_P vstrwq_p_u32
++
++#include "pr108177.x"
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/pr108177-12-run.c
+@@ -0,0 +1,6 @@
++/* { dg-do run } */
++/* { dg-require-effective-target arm_mve_hw } */
++/* { dg-options "-O2" } */
++/* { dg-add-options arm_v8_1m_mve } */
++
++#include "pr108177-12.c"
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/pr108177-12.c
+@@ -0,0 +1,20 @@
++/* { dg-do compile } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-options "-O2" } */
++/* { dg-add-options arm_v8_1m_mve } */
++/* { dg-final { check-function-bodies "**" "" "" } } */
++
++/*
++** test:
++**...
++** vstrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\]
++**...
++** vstrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\]
++**...
++*/
++
++#define TYPE int32x4_t
++#define INTRINSIC vstrwq_s32
++#define INTRINSIC_P vstrwq_p_s32
++
++#include "pr108177.x"
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/pr108177-13-run.c
+@@ -0,0 +1,6 @@
++/* { dg-do run } */
++/* { dg-require-effective-target arm_mve_hw } */
++/* { dg-options "-O2" } */
++/* { dg-add-options arm_v8_1m_mve_fp } */
++
++#include "pr108177-13.c"
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/pr108177-13.c
+@@ -0,0 +1,20 @@
++/* { dg-do compile } */
++/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
++/* { dg-options "-O2" } */
++/* { dg-add-options arm_v8_1m_mve_fp } */
++/* { dg-final { check-function-bodies "**" "" "" } } */
++
++/*
++** test:
++**...
++** vstrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\]
++**...
++** vstrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\]
++**...
++*/
++
++#define TYPE float16x8_t
++#define INTRINSIC vstrhq_f16
++#define INTRINSIC_P vstrhq_p_f16
++
++#include "pr108177.x"
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/pr108177-14-run.c
+@@ -0,0 +1,6 @@
++/* { dg-do run } */
++/* { dg-require-effective-target arm_mve_hw } */
++/* { dg-options "-O2" } */
++/* { dg-add-options arm_v8_1m_mve_fp } */
++
++#include "pr108177-14.c"
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/pr108177-14.c
+@@ -0,0 +1,20 @@
++/* { dg-do compile } */
++/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
++/* { dg-options "-O2" } */
++/* { dg-add-options arm_v8_1m_mve_fp } */
++/* { dg-final { check-function-bodies "**" "" "" } } */
++
++/*
++** test:
++**...
++** vstrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\]
++**...
++** vstrwt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\]
++**...
++*/
++
++#define TYPE float32x4_t
++#define INTRINSIC vstrwq_f32
++#define INTRINSIC_P vstrwq_p_f32
++
++#include "pr108177.x"
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/pr108177-2-run.c
+@@ -0,0 +1,6 @@
++/* { dg-do run } */
++/* { dg-require-effective-target arm_mve_hw } */
++/* { dg-options "-O2" } */
++/* { dg-add-options arm_v8_1m_mve } */
++
++#include "pr108177-2.c"
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/pr108177-2.c
+@@ -0,0 +1,20 @@
++/* { dg-do compile } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-options "-O2" } */
++/* { dg-add-options arm_v8_1m_mve } */
++/* { dg-final { check-function-bodies "**" "" "" } } */
++
++/*
++** test:
++**...
++** vstrbt.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\]
++**...
++** vstrbt.8 q[0-9]+, \[(?:ip|fp|r[0-9]+)\]
++**...
++*/
++
++#define TYPE int8x16_t
++#define INTRINSIC vstrbq_s8
++#define INTRINSIC_P vstrbq_p_s8
++
++#include "pr108177.x"
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/pr108177-3-run.c
+@@ -0,0 +1,6 @@
++/* { dg-do run } */
++/* { dg-require-effective-target arm_mve_hw } */
++/* { dg-options "-O2" } */
++/* { dg-add-options arm_v8_1m_mve } */
++
++#include "pr108177-3.c"
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/pr108177-3.c
+@@ -0,0 +1,20 @@
++/* { dg-do compile } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-options "-O2" } */
++/* { dg-add-options arm_v8_1m_mve } */
++/* { dg-final { check-function-bodies "**" "" "" } } */
++
++/*
++** test:
++**...
++** vstrbt.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\]
++**...
++** vstrbt.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\]
++**...
++*/
++
++#define TYPE uint16x8_t
++#define INTRINSIC vstrbq_u16
++#define INTRINSIC_P vstrbq_p_u16
++
++#include "pr108177.x"
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/pr108177-4-run.c
+@@ -0,0 +1,6 @@
++/* { dg-do run } */
++/* { dg-require-effective-target arm_mve_hw } */
++/* { dg-options "-O2" } */
++/* { dg-add-options arm_v8_1m_mve } */
++
++#include "pr108177-4.c"
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/pr108177-4.c
+@@ -0,0 +1,20 @@
++/* { dg-do compile } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-options "-O2" } */
++/* { dg-add-options arm_v8_1m_mve } */
++/* { dg-final { check-function-bodies "**" "" "" } } */
++
++/*
++** test:
++**...
++** vstrbt.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\]
++**...
++** vstrbt.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\]
++**...
++*/
++
++#define TYPE int16x8_t
++#define INTRINSIC vstrbq_s16
++#define INTRINSIC_P vstrbq_p_s16
++
++#include "pr108177.x"
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/pr108177-5-run.c
+@@ -0,0 +1,6 @@
++/* { dg-do run } */
++/* { dg-require-effective-target arm_mve_hw } */
++/* { dg-options "-O2" } */
++/* { dg-add-options arm_v8_1m_mve } */
++
++#include "pr108177-5.c"
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/pr108177-5.c
+@@ -0,0 +1,20 @@
++/* { dg-do compile } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-options "-O2" } */
++/* { dg-add-options arm_v8_1m_mve } */
++/* { dg-final { check-function-bodies "**" "" "" } } */
++
++/*
++** test:
++**...
++** vstrbt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\]
++**...
++** vstrbt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\]
++**...
++*/
++
++#define TYPE uint32x4_t
++#define INTRINSIC vstrbq_u32
++#define INTRINSIC_P vstrbq_p_u32
++
++#include "pr108177.x"
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/pr108177-6-run.c
+@@ -0,0 +1,6 @@
++/* { dg-do run } */
++/* { dg-require-effective-target arm_mve_hw } */
++/* { dg-options "-O2" } */
++/* { dg-add-options arm_v8_1m_mve } */
++
++#include "pr108177-6.c"
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/pr108177-6.c
+@@ -0,0 +1,20 @@
++/* { dg-do compile } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-options "-O2" } */
++/* { dg-add-options arm_v8_1m_mve } */
++/* { dg-final { check-function-bodies "**" "" "" } } */
++
++/*
++** test:
++**...
++** vstrbt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\]
++**...
++** vstrbt.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\]
++**...
++*/
++
++#define TYPE int32x4_t
++#define INTRINSIC vstrbq_s32
++#define INTRINSIC_P vstrbq_p_s32
++
++#include "pr108177.x"
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/pr108177-7-run.c
+@@ -0,0 +1,6 @@
++/* { dg-do run } */
++/* { dg-require-effective-target arm_mve_hw } */
++/* { dg-options "-O2" } */
++/* { dg-add-options arm_v8_1m_mve } */
++
++#include "pr108177-7.c"
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/pr108177-7.c
+@@ -0,0 +1,20 @@
++/* { dg-do compile } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-options "-O2" } */
++/* { dg-add-options arm_v8_1m_mve } */
++/* { dg-final { check-function-bodies "**" "" "" } } */
++
++/*
++** test:
++**...
++** vstrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\]
++**...
++** vstrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\]
++**...
++*/
++
++#define TYPE uint16x8_t
++#define INTRINSIC vstrhq_u16
++#define INTRINSIC_P vstrhq_p_u16
++
++#include "pr108177.x"
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/pr108177-8-run.c
+@@ -0,0 +1,6 @@
++/* { dg-do run } */
++/* { dg-require-effective-target arm_mve_hw } */
++/* { dg-options "-O2" } */
++/* { dg-add-options arm_v8_1m_mve } */
++
++#include "pr108177-8.c"
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/pr108177-8.c
+@@ -0,0 +1,20 @@
++/* { dg-do compile } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-options "-O2" } */
++/* { dg-add-options arm_v8_1m_mve } */
++/* { dg-final { check-function-bodies "**" "" "" } } */
++
++/*
++** test:
++**...
++** vstrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\]
++**...
++** vstrht.16 q[0-9]+, \[(?:ip|fp|r[0-9]+)\]
++**...
++*/
++
++#define TYPE int16x8_t
++#define INTRINSIC vstrhq_s16
++#define INTRINSIC_P vstrhq_p_s16
++
++#include "pr108177.x"
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/pr108177-9-run.c
+@@ -0,0 +1,6 @@
++/* { dg-do run } */
++/* { dg-require-effective-target arm_mve_hw } */
++/* { dg-options "-O2" } */
++/* { dg-add-options arm_v8_1m_mve } */
++
++#include "pr108177-9.c"
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/pr108177-9.c
+@@ -0,0 +1,20 @@
++/* { dg-do compile } */
++/* { dg-require-effective-target arm_v8_1m_mve_ok } */
++/* { dg-options "-O2" } */
++/* { dg-add-options arm_v8_1m_mve } */
++/* { dg-final { check-function-bodies "**" "" "" } } */
++
++/*
++** test:
++**...
++** vstrht.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\]
++**...
++** vstrht.32 q[0-9]+, \[(?:ip|fp|r[0-9]+)\]
++**...
++*/
++
++#define TYPE uint32x4_t
++#define INTRINSIC vstrhq_u32
++#define INTRINSIC_P vstrhq_p_u32
++
++#include "pr108177.x"
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/pr108177-main.x
+@@ -0,0 +1,31 @@
++#include <arm_mve.h>
++extern void abort (void);
++
++__attribute__ ((noipa)) void
++write_expected (uint32x4_t v, void *a)
++{
++ TYPE _v = (TYPE) v;
++ INTRINSIC (a, _v);
++}
++
++void test (uint32x4_t, void *, mve_pred16_t, mve_pred16_t);
++
++int main(void)
++{
++ uint32x4_t v = {0, 1, 2, 3};
++ uint32_t actual[] = {0, 0, 0, 0};
++ uint32_t expected[] = {0, 0, 0, 0};
++
++ write_expected (v, &(expected[0]));
++
++ mve_pred16_t p1 = 0xff00;
++ mve_pred16_t p2 = 0x00ff;
++
++ test (v, (void *)&actual[0], p1, p2);
++
++ if (__builtin_memcmp (&actual[0], &expected[0], 16) != 0)
++ abort ();
++
++ return 0;
++}
++
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/mve/pr108177.x
+@@ -0,0 +1,9 @@
++#include "pr108177-main.x"
++
++__attribute__ ((noipa)) void
++test (uint32x4_t v, void *a, mve_pred16_t p1, mve_pred16_t p2)
++{
++ TYPE _v = (TYPE) v;
++ INTRINSIC_P (a, _v, p1);
++ INTRINSIC_P (a, _v, p2);
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/pr109939.c
+@@ -0,0 +1,14 @@
++/* { dg-do compile } */
++/* { dg-require-effective-target arm_sat_ok } */
++/* { dg-add-options arm_sat } */
++/* { dg-additional-options "-O -Wall -Wconversion" } */
++
++#include <arm_acle.h>
++
++int dbg_ssat_out;
++int dbg_ssat_in;
++
++void test_arm_ssat(void)
++{
++ dbg_ssat_out = __ssat(dbg_ssat_in, 16);
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/arm/pure-code/pr109800.c
+@@ -0,0 +1,4 @@
++/* { dg-do compile } */
++/* { dg-require-effective-target arm_hard_ok } */
++/* { dg-options "-O2 -march=armv7-m -mfloat-abi=hard -mfpu=fpv4-sp-d16 -mbig-endian -mpure-code" } */
++double f() { return 5.0; }
+--- a/src/gcc/testsuite/gcc.target/arm/simd/mve-compare-1.c
++++ b/src/gcc/testsuite/gcc.target/arm/simd/mve-compare-1.c
+@@ -50,31 +50,31 @@ TEST_TYPE (vs32, __INT32_TYPE__, COMPARE_REG_AND_ZERO, 16)
+ TEST_TYPE (vu32, __UINT32_TYPE__, COMPARE_REG, 16)
+
+ /* { 8 bits } x { eq, ne, lt, le, gt, ge, hi, cs }.
+-/* { dg-final { scan-assembler-times {\tvcmp.i8 eq, q[0-9]+, q[0-9]+\n} 4 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.i8 ne, q[0-9]+, q[0-9]+\n} 4 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.s8 lt, q[0-9]+, q[0-9]+\n} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.s8 le, q[0-9]+, q[0-9]+\n} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.s8 gt, q[0-9]+, q[0-9]+\n} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.s8 ge, q[0-9]+, q[0-9]+\n} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.u8 hi, q[0-9]+, q[0-9]+\n} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.u8 cs, q[0-9]+, q[0-9]+\n} 2 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.i8\teq, q[0-9]+, q[0-9]+\n} 4 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.i8\tne, q[0-9]+, q[0-9]+\n} 4 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.s8\tlt, q[0-9]+, q[0-9]+\n} 2 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.s8\tle, q[0-9]+, q[0-9]+\n} 2 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.s8\tgt, q[0-9]+, q[0-9]+\n} 2 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.s8\tge, q[0-9]+, q[0-9]+\n} 2 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.u8\thi, q[0-9]+, q[0-9]+\n} 2 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.u8\tcs, q[0-9]+, q[0-9]+\n} 2 } } */
+
+ /* { 16 bits } x { eq, ne, lt, le, gt, ge, hi, cs }.
+-/* { dg-final { scan-assembler-times {\tvcmp.i16 eq, q[0-9]+, q[0-9]+\n} 4 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.i16 ne, q[0-9]+, q[0-9]+\n} 4 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.s16 lt, q[0-9]+, q[0-9]+\n} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.s16 le, q[0-9]+, q[0-9]+\n} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.s16 gt, q[0-9]+, q[0-9]+\n} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.s16 ge, q[0-9]+, q[0-9]+\n} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.u16 hi, q[0-9]+, q[0-9]+\n} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.u16 cs, q[0-9]+, q[0-9]+\n} 2 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.i16\teq, q[0-9]+, q[0-9]+\n} 4 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.i16\tne, q[0-9]+, q[0-9]+\n} 4 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.s16\tlt, q[0-9]+, q[0-9]+\n} 2 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.s16\tle, q[0-9]+, q[0-9]+\n} 2 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.s16\tgt, q[0-9]+, q[0-9]+\n} 2 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.s16\tge, q[0-9]+, q[0-9]+\n} 2 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.u16\thi, q[0-9]+, q[0-9]+\n} 2 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.u16\tcs, q[0-9]+, q[0-9]+\n} 2 } } */
+
+ /* { 32 bits } x { eq, ne, lt, le, gt, ge, hi, cs }.
+-/* { dg-final { scan-assembler-times {\tvcmp.i32 eq, q[0-9]+, q[0-9]+\n} 4 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.i32 ne, q[0-9]+, q[0-9]+\n} 4 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.s32 lt, q[0-9]+, q[0-9]+\n} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.s32 le, q[0-9]+, q[0-9]+\n} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.s32 gt, q[0-9]+, q[0-9]+\n} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.s32 ge, q[0-9]+, q[0-9]+\n} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.u32 hi, q[0-9]+, q[0-9]+\n} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.u32 cs, q[0-9]+, q[0-9]+\n} 2 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.i32\teq, q[0-9]+, q[0-9]+\n} 4 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.i32\tne, q[0-9]+, q[0-9]+\n} 4 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.s32\tlt, q[0-9]+, q[0-9]+\n} 2 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.s32\tle, q[0-9]+, q[0-9]+\n} 2 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.s32\tgt, q[0-9]+, q[0-9]+\n} 2 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.s32\tge, q[0-9]+, q[0-9]+\n} 2 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.u32\thi, q[0-9]+, q[0-9]+\n} 2 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.u32\tcs, q[0-9]+, q[0-9]+\n} 2 } } */
+--- a/src/gcc/testsuite/gcc.target/arm/simd/mve-compare-scalar-1.c
++++ b/src/gcc/testsuite/gcc.target/arm/simd/mve-compare-scalar-1.c
+@@ -39,31 +39,31 @@ TEST_TYPE (vs32, __INT32_TYPE__, 16)
+ TEST_TYPE (vu32, __UINT32_TYPE__, 16)
+
+ /* { 8 bits } x { eq, ne, lt, le, gt, ge, hi, cs }.
+-/* { dg-final { scan-assembler-times {\tvcmp.i8 eq, q[0-9]+, q[0-9]+\n} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.i8 ne, q[0-9]+, q[0-9]+\n} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.s8 lt, q[0-9]+, q[0-9]+\n} 1 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.s8 le, q[0-9]+, q[0-9]+\n} 1 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.s8 gt, q[0-9]+, q[0-9]+\n} 1 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.s8 ge, q[0-9]+, q[0-9]+\n} 1 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.u8 hi, q[0-9]+, q[0-9]+\n} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.u8 cs, q[0-9]+, q[0-9]+\n} 2 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.i8\teq, q[0-9]+, q[0-9]+\n} 2 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.i8\tne, q[0-9]+, q[0-9]+\n} 2 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.s8\tlt, q[0-9]+, q[0-9]+\n} 1 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.s8\tle, q[0-9]+, q[0-9]+\n} 1 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.s8\tgt, q[0-9]+, q[0-9]+\n} 1 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.s8\tge, q[0-9]+, q[0-9]+\n} 1 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.u8\thi, q[0-9]+, q[0-9]+\n} 2 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.u8\tcs, q[0-9]+, q[0-9]+\n} 2 } } */
+
+ /* { 16 bits } x { eq, ne, lt, le, gt, ge, hi, cs }.
+-/* { dg-final { scan-assembler-times {\tvcmp.i16 eq, q[0-9]+, q[0-9]+\n} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.i16 ne, q[0-9]+, q[0-9]+\n} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.s16 lt, q[0-9]+, q[0-9]+\n} 1 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.s16 le, q[0-9]+, q[0-9]+\n} 1 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.s16 gt, q[0-9]+, q[0-9]+\n} 1 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.s16 ge, q[0-9]+, q[0-9]+\n} 1 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.u16 hi, q[0-9]+, q[0-9]+\n} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.u16 cs, q[0-9]+, q[0-9]+\n} 2 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.i16\teq, q[0-9]+, q[0-9]+\n} 2 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.i16\tne, q[0-9]+, q[0-9]+\n} 2 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.s16\tlt, q[0-9]+, q[0-9]+\n} 1 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.s16\tle, q[0-9]+, q[0-9]+\n} 1 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.s16\tgt, q[0-9]+, q[0-9]+\n} 1 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.s16\tge, q[0-9]+, q[0-9]+\n} 1 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.u16\thi, q[0-9]+, q[0-9]+\n} 2 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.u16\tcs, q[0-9]+, q[0-9]+\n} 2 } } */
+
+ /* { 32 bits } x { eq, ne, lt, le, gt, ge, hi, cs }.
+-/* { dg-final { scan-assembler-times {\tvcmp.i32 eq, q[0-9]+, q[0-9]+\n} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.i32 ne, q[0-9]+, q[0-9]+\n} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.s32 lt, q[0-9]+, q[0-9]+\n} 1 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.s32 le, q[0-9]+, q[0-9]+\n} 1 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.s32 gt, q[0-9]+, q[0-9]+\n} 1 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.s32 ge, q[0-9]+, q[0-9]+\n} 1 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.u32 hi, q[0-9]+, q[0-9]+\n} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.u32 cs, q[0-9]+, q[0-9]+\n} 2 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.i32\teq, q[0-9]+, q[0-9]+\n} 2 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.i32\tne, q[0-9]+, q[0-9]+\n} 2 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.s32\tlt, q[0-9]+, q[0-9]+\n} 1 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.s32\tle, q[0-9]+, q[0-9]+\n} 1 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.s32\tgt, q[0-9]+, q[0-9]+\n} 1 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.s32\tge, q[0-9]+, q[0-9]+\n} 1 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.u32\thi, q[0-9]+, q[0-9]+\n} 2 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.u32\tcs, q[0-9]+, q[0-9]+\n} 2 } } */
+--- a/src/gcc/testsuite/gcc.target/arm/simd/mve-vabs.c
++++ b/src/gcc/testsuite/gcc.target/arm/simd/mve-vabs.c
+@@ -38,7 +38,7 @@ FUNC(f, float, 16, 8, vabs)
+ integer optimizations actually generate a call to memmove, the other ones a
+ 'vabs'. */
+ /* { dg-final { scan-assembler-times {vabs.s[0-9]+\tq[0-9]+, q[0-9]+} 3 } } */
+-/* { dg-final { scan-assembler-times {vabs.f[0-9]+ q[0-9]+, q[0-9]+} 2 } } */
++/* { dg-final { scan-assembler-times {vabs.f[0-9]+\tq[0-9]+, q[0-9]+} 2 } } */
+ /* { dg-final { scan-assembler-times {vldr[bhw].[0-9]+\tq[0-9]+} 5 } } */
+ /* { dg-final { scan-assembler-times {vstr[bhw].[0-9]+\tq[0-9]+} 5 } } */
+ /* { dg-final { scan-assembler-times {memmove} 3 } } */
+--- a/src/gcc/testsuite/gcc.target/arm/simd/mve-vadd-1.c
++++ b/src/gcc/testsuite/gcc.target/arm/simd/mve-vadd-1.c
+@@ -22,9 +22,9 @@ FUNC(u, uint, 16, 8, +, vadd)
+ FUNC(s, int, 8, 16, +, vadd)
+ FUNC(u, uint, 8, 16, +, vadd)
+
+-/* { dg-final { scan-assembler-times {vadd\.i32 q[0-9]+, q[0-9]+, q[0-9]+} 2 } } */
+-/* { dg-final { scan-assembler-times {vadd\.i16 q[0-9]+, q[0-9]+, q[0-9]+} 2 } } */
+-/* { dg-final { scan-assembler-times {vadd\.i8 q[0-9]+, q[0-9]+, q[0-9]+} 2 } } */
++/* { dg-final { scan-assembler-times {vadd\.i32\tq[0-9]+, q[0-9]+, q[0-9]+} 2 } } */
++/* { dg-final { scan-assembler-times {vadd\.i16\tq[0-9]+, q[0-9]+, q[0-9]+} 2 } } */
++/* { dg-final { scan-assembler-times {vadd\.i8\tq[0-9]+, q[0-9]+, q[0-9]+} 2 } } */
+
+ void test_vadd_f32 (float * dest, float * a, float * b) {
+ int i;
+@@ -32,7 +32,7 @@ void test_vadd_f32 (float * dest, float * a, float * b) {
+ dest[i] = a[i] + b[i];
+ }
+ }
+-/* { dg-final { scan-assembler-times {vadd\.f32 q[0-9]+, q[0-9]+, q[0-9]+} 1 } } */
++/* { dg-final { scan-assembler-times {vadd\.f32\tq[0-9]+, q[0-9]+, q[0-9]+} 1 } } */
+
+ void test_vadd_f16 (__fp16 * dest, __fp16 * a, __fp16 * b) {
+ int i;
+@@ -40,4 +40,4 @@ void test_vadd_f16 (__fp16 * dest, __fp16 * a, __fp16 * b) {
+ dest[i] = a[i] + b[i];
+ }
+ }
+-/* { dg-final { scan-assembler-times {vadd\.f16 q[0-9]+, q[0-9]+, q[0-9]+} 1 } } */
++/* { dg-final { scan-assembler-times {vadd\.f16\tq[0-9]+, q[0-9]+, q[0-9]+} 1 } } */
+--- a/src/gcc/testsuite/gcc.target/arm/simd/mve-vadd-scalar-1.c
++++ b/src/gcc/testsuite/gcc.target/arm/simd/mve-vadd-scalar-1.c
+@@ -24,9 +24,9 @@ FUNC_IMM(u, uint, 8, 16, +, vaddimm)
+
+ /* For the moment we do not select the T2 vadd variant operating on a scalar
+ final argument. */
+-/* { dg-final { scan-assembler-times {vadd\.i32 q[0-9]+, q[0-9]+, r[0-9]+} 2 { xfail *-*-* } } } */
+-/* { dg-final { scan-assembler-times {vadd\.i16 q[0-9]+, q[0-9]+, r[0-9]+} 2 { xfail *-*-* } } } */
+-/* { dg-final { scan-assembler-times {vadd\.i8 q[0-9]+, q[0-9]+, r[0-9]+} 2 { xfail *-*-* } } } */
++/* { dg-final { scan-assembler-times {vadd\.i32\tq[0-9]+, q[0-9]+, r[0-9]+} 2 { xfail *-*-* } } } */
++/* { dg-final { scan-assembler-times {vadd\.i16\tq[0-9]+, q[0-9]+, r[0-9]+} 2 { xfail *-*-* } } } */
++/* { dg-final { scan-assembler-times {vadd\.i8\tq[0-9]+, q[0-9]+, r[0-9]+} 2 { xfail *-*-* } } } */
+
+ void test_vaddimm_f32 (float * dest, float * a) {
+ int i;
+@@ -34,7 +34,7 @@ void test_vaddimm_f32 (float * dest, float * a) {
+ dest[i] = a[i] + 5.0;
+ }
+ }
+-/* { dg-final { scan-assembler-times {vadd\.f32 q[0-9]+, q[0-9]+, r[0-9]+} 1 { xfail *-*-* } } } */
++/* { dg-final { scan-assembler-times {vadd\.f32\tq[0-9]+, q[0-9]+, r[0-9]+} 1 { xfail *-*-* } } } */
+
+ /* Note that dest[i] = a[i] + 5.0f16 is not vectorized. */
+ void test_vaddimm_f16 (__fp16 * dest, __fp16 * a) {
+@@ -44,4 +44,4 @@ void test_vaddimm_f16 (__fp16 * dest, __fp16 * a) {
+ dest[i] = a[i] + b;
+ }
+ }
+-/* { dg-final { scan-assembler-times {vadd\.f16 q[0-9]+, q[0-9]+, r[0-9]+} 1 { xfail *-*-* } } } */
++/* { dg-final { scan-assembler-times {vadd\.f16\tq[0-9]+, q[0-9]+, r[0-9]+} 1 { xfail *-*-* } } } */
+--- a/src/gcc/testsuite/gcc.target/arm/simd/mve-vclz.c
++++ b/src/gcc/testsuite/gcc.target/arm/simd/mve-vclz.c
+@@ -23,6 +23,6 @@ FUNC(u, uint, 8, clz)
+
+ /* 16 and 8-bit versions are not vectorized because they need pack/unpack
+ patterns since __builtin_clz uses 32-bit parameter and return value. */
+-/* { dg-final { scan-assembler-times {vclz\.i32 q[0-9]+, q[0-9]+} 2 } } */
+-/* { dg-final { scan-assembler-times {vclz\.i16 q[0-9]+, q[0-9]+} 2 { xfail *-*-* } } } */
+-/* { dg-final { scan-assembler-times {vclz\.i8 q[0-9]+, q[0-9]+} 2 { xfail *-*-* } } } */
++/* { dg-final { scan-assembler-times {vclz\.i32\tq[0-9]+, q[0-9]+} 2 } } */
++/* { dg-final { scan-assembler-times {vclz\.i16\tq[0-9]+, q[0-9]+} 2 { xfail *-*-* } } } */
++/* { dg-final { scan-assembler-times {vclz\.i8\tq[0-9]+, q[0-9]+} 2 { xfail *-*-* } } } */
+--- a/src/gcc/testsuite/gcc.target/arm/simd/mve-vcmp.c
++++ b/src/gcc/testsuite/gcc.target/arm/simd/mve-vcmp.c
+@@ -36,15 +36,15 @@ ALL_FUNCS(>=, vcmpge)
+
+ /* MVE has only 128-bit vectors, so we can vectorize only half of the
+ functions above. */
+-/* { dg-final { scan-assembler-times {\tvcmp.i[0-9]+ eq, q[0-9]+, q[0-9]+\n} 6 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.i[0-9]+ ne, q[0-9]+, q[0-9]+\n} 6 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.i[0-9]+\teq, q[0-9]+, q[0-9]+\n} 6 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.i[0-9]+\tne, q[0-9]+, q[0-9]+\n} 6 } } */
+
+ /* lt, le, gt, ge apply to signed types, cs and hi to unsigned types. */
+ /* lt and le with unsigned types are replaced with the opposite condition, hence
+ the double number of matches for cs and hi. */
+-/* { dg-final { scan-assembler-times {\tvcmp.s[0-9]+ lt, q[0-9]+, q[0-9]+\n} 3 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.s[0-9]+ le, q[0-9]+, q[0-9]+\n} 3 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.s[0-9]+ gt, q[0-9]+, q[0-9]+\n} 3 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.s[0-9]+ ge, q[0-9]+, q[0-9]+\n} 3 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.u[0-9]+ cs, q[0-9]+, q[0-9]+\n} 6 } } */
+-/* { dg-final { scan-assembler-times {\tvcmp.u[0-9]+ hi, q[0-9]+, q[0-9]+\n} 6 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.s[0-9]+\tlt, q[0-9]+, q[0-9]+\n} 3 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.s[0-9]+\tle, q[0-9]+, q[0-9]+\n} 3 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.s[0-9]+\tgt, q[0-9]+, q[0-9]+\n} 3 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.s[0-9]+\tge, q[0-9]+, q[0-9]+\n} 3 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.u[0-9]+\tcs, q[0-9]+, q[0-9]+\n} 6 } } */
++/* { dg-final { scan-assembler-times {\tvcmp.u[0-9]+\thi, q[0-9]+, q[0-9]+\n} 6 } } */
+--- a/src/gcc/testsuite/gcc.target/arm/simd/mve-vneg.c
++++ b/src/gcc/testsuite/gcc.target/arm/simd/mve-vneg.c
+@@ -45,8 +45,8 @@ FUNC(f, float, 16, 8, -, vneg)
+
+ /* MVE has only 128-bit vectors, so we can vectorize only half of the
+ functions above. */
+-/* { dg-final { scan-assembler-times {vneg.s[0-9]+ q[0-9]+, q[0-9]+} 6 } } */
+-/* { dg-final { scan-assembler-times {vneg.f[0-9]+ q[0-9]+, q[0-9]+} 2 } } */
++/* { dg-final { scan-assembler-times {vneg.s[0-9]+\tq[0-9]+, q[0-9]+} 6 } } */
++/* { dg-final { scan-assembler-times {vneg.f[0-9]+\tq[0-9]+, q[0-9]+} 2 } } */
+ /* { dg-final { scan-assembler-times {vldr[bhw].[0-9]+\tq[0-9]+} 8 } } */
+ /* { dg-final { scan-assembler-times {vstr[bhw].[0-9]+\tq[0-9]+} 8 } } */
+ /* { dg-final { scan-assembler-not {orr\tr[0-9]+, r[0-9]+, r[0-9]+} } } */
+--- a/src/gcc/testsuite/gcc.target/arm/simd/mve-vshr.c
++++ b/src/gcc/testsuite/gcc.target/arm/simd/mve-vshr.c
+@@ -58,7 +58,7 @@ FUNC_IMM(u, uint, 8, 16, >>, vshrimm)
+ /* Vector right shifts use vneg and left shifts. */
+ /* { dg-final { scan-assembler-times {vshl.s[0-9]+\tq[0-9]+, q[0-9]+} 3 } } */
+ /* { dg-final { scan-assembler-times {vshl.u[0-9]+\tq[0-9]+, q[0-9]+} 3 } } */
+-/* { dg-final { scan-assembler-times {vneg.s[0-9]+ q[0-9]+, q[0-9]+} 6 } } */
++/* { dg-final { scan-assembler-times {vneg.s[0-9]+\tq[0-9]+, q[0-9]+} 6 } } */
+
+
+ /* Shift by immediate. */
+--- a/src/gcc/testsuite/gcc.target/arm/simd/pr101325.c
++++ b/src/gcc/testsuite/gcc.target/arm/simd/pr101325.c
+@@ -9,6 +9,6 @@ unsigned foo(int8x16_t v, int8x16_t w)
+ {
+ return vcmpeqq (v, w);
+ }
+-/* { dg-final { scan-assembler {\tvcmp.i8 eq} } } */
+-/* { dg-final { scan-assembler {\tvmrs\tr[0-9]+, P0} } } */
++/* { dg-final { scan-assembler {\tvcmp.i8\teq} } } */
++/* { dg-final { scan-assembler {\tvmrs\tr[0-9]+, p0} } } */
+ /* { dg-final { scan-assembler {\tuxth} } } */
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/avr/pr82931.c
+@@ -0,0 +1,29 @@
++/* { dg-options "-Os" } */
++/* { dg-final { scan-assembler-times "bst" 4 } } */
++/* { dg-final { scan-assembler-times "bld" 4 } } */
++
++typedef __UINT8_TYPE__ uint8_t;
++typedef __UINT16_TYPE__ uint16_t;
++
++#define BitMask (1u << 14)
++#define Bit8Mask ((uint8_t) (1u << 4))
++
++void merge1_8 (uint8_t *dst, const uint8_t *src)
++{
++ *dst = (*src & Bit8Mask) | (*dst & ~ Bit8Mask);
++}
++
++void merge2_8 (uint8_t *dst, const uint8_t *src)
++{
++ *dst ^= (*dst ^ *src) & Bit8Mask;
++}
++
++void merge1_16 (uint16_t *dst, const uint16_t *src)
++{
++ *dst = (*src & BitMask) | (*dst & ~ BitMask);
++}
++
++void merge2_16 (uint16_t *dst, const uint16_t *src)
++{
++ *dst ^= (*dst ^ *src) & BitMask;
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/avr/torture/pr105753.c
+@@ -0,0 +1,13 @@
++int digit_sum (unsigned long n)
++{
++ int sum = 0;
++
++ do
++ {
++ int x = n % 10;
++ n /= 10;
++ sum += x;
++ } while(n);
++
++ return sum;
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/avr/torture/pr109650-1.c
+@@ -0,0 +1,63 @@
++/* { dg-do run } */
++/* { dg-options { -std=c99 } } */
++
++typedef _Bool bool;
++typedef __UINT8_TYPE__ uint8_t;
++
++static inline __attribute__((__always_inline__))
++bool func1a (bool p1, uint8_t p2)
++{
++ if (p1)
++ return p2 <= 8;
++ return p2 <= 2;
++}
++
++__attribute__((__noinline__, __noclone__))
++bool func1b (bool p1, uint8_t p2)
++{
++ return func1a (p1, p2);
++}
++
++static inline __attribute__((__always_inline__))
++bool func2a (bool p1, unsigned p2)
++{
++ if (p1)
++ return p2 <= 8;
++ return p2 <= 2;
++}
++
++__attribute__((__noinline__, __noclone__))
++bool func2b (bool p1, unsigned p2)
++{
++ return func2a (p1, p2);
++}
++
++void test1 (void)
++{
++ if (func1a (0, 1) != func1b (0, 1)) __builtin_abort();
++ if (func1a (0, 2) != func1b (0, 2)) __builtin_abort();
++ if (func1a (0, 3) != func1b (0, 3)) __builtin_abort();
++
++ if (func1a (1, 7) != func1b (1, 7)) __builtin_abort();
++ if (func1a (1, 8) != func1b (1, 8)) __builtin_abort();
++ if (func1a (1, 9) != func1b (1, 9)) __builtin_abort();
++}
++
++void test2 (void)
++{
++ if (func2a (0, 1) != func2b (0, 1)) __builtin_abort();
++ if (func2a (0, 2) != func2b (0, 2)) __builtin_abort();
++ if (func2a (0, 3) != func2b (0, 3)) __builtin_abort();
++
++ if (func2a (1, 7) != func2b (1, 7)) __builtin_abort();
++ if (func2a (1, 8) != func2b (1, 8)) __builtin_abort();
++ if (func2a (1, 9) != func2b (1, 9)) __builtin_abort();
++}
++
++int main (void)
++{
++ test1();
++ test2();
++
++ __builtin_exit (0);
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/avr/torture/pr109650-2.c
+@@ -0,0 +1,79 @@
++/* { dg-do run } */
++
++typedef __UINT8_TYPE__ uint8_t;
++
++#define AI static __inline__ __attribute__((__always_inline__))
++#define NI __attribute__((__noinline__,__noclone__))
++
++AI uint8_t func1_eq (uint8_t c, unsigned x)
++{
++ if (x == c)
++ return 1;
++ return 0;
++}
++
++AI uint8_t func1_ne (uint8_t c, unsigned x)
++{
++ if (x != c)
++ return 1;
++ return 0;
++}
++
++AI uint8_t func1_ltu (uint8_t c, unsigned x)
++{
++ if (x < c)
++ return 1;
++ return 0;
++}
++
++AI uint8_t func1_leu (uint8_t c, unsigned x)
++{
++ if (x <= c)
++ return 1;
++ return 0;
++}
++
++AI uint8_t func1_gtu (uint8_t c, unsigned x)
++{
++ if (x > c)
++ return 1;
++ return 0;
++}
++
++AI uint8_t func1_geu (uint8_t c, unsigned x)
++{
++ if (x >= c)
++ return 1;
++ return 0;
++}
++
++NI uint8_t func2_eq (uint8_t c, unsigned x) { return func1_eq (c, x); }
++NI uint8_t func2_ne (uint8_t c, unsigned x) { return func1_ne (c, x); }
++NI uint8_t func2_ltu (uint8_t c, unsigned x) { return func1_ltu (c, x); }
++NI uint8_t func2_leu (uint8_t c, unsigned x) { return func1_leu (c, x); }
++NI uint8_t func2_gtu (uint8_t c, unsigned x) { return func1_gtu (c, x); }
++NI uint8_t func2_geu (uint8_t c, unsigned x) { return func1_geu (c, x); }
++
++AI void test4 (uint8_t c, unsigned x)
++{
++ if (func2_eq (c, x) != func1_eq (c, x)) __builtin_abort();
++ if (func2_ne (c, x) != func1_ne (c, x)) __builtin_abort();
++ if (func2_ltu (c, x) != func1_ltu (c, x)) __builtin_abort();
++ if (func2_leu (c, x) != func1_leu (c, x)) __builtin_abort();
++ if (func2_gtu (c, x) != func1_gtu (c, x)) __builtin_abort();
++ if (func2_geu (c, x) != func1_geu (c, x)) __builtin_abort();
++}
++
++int main (void)
++{
++ test4 (127, 127);
++ test4 (127, 128);
++ test4 (128, 127);
++
++ test4 (0x42, 0x142);
++ test4 (0x0, 0x100);
++ test4 (0x0, 0x0);
++ test4 (0x0, 0x1);
++
++ __builtin_exit (0);
++}
+--- a/src/gcc/testsuite/gcc.target/i386/avx-vzeroupper-12.c
++++ b/src/gcc/testsuite/gcc.target/i386/avx-vzeroupper-12.c
+@@ -16,5 +16,6 @@ foo ()
+ _mm256_zeroupper ();
+ }
+
+-/* { dg-final { scan-assembler-times "avx_vzeroupper" 4 } } */
++/* { dg-final { scan-assembler-times "avx_vzeroupper" 4 { target ia32 } } } */
++/* { dg-final { scan-assembler-times "avx_vzeroupper" 5 { target { ! ia32 } } } } */
+ /* { dg-final { scan-assembler-times "\\*avx_vzeroall" 1 } } */
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/i386/avx-vzeroupper-29.c
+@@ -0,0 +1,14 @@
++/* { dg-do compile } */
++/* { dg-options "-O0 -mavx -mtune=generic -mvzeroupper -dp" } */
++
++#include <immintrin.h>
++
++extern __m256 x, y;
++
++void
++foo ()
++{
++ x = y;
++}
++
++/* { dg-final { scan-assembler-times "avx_vzeroupper" 1 } } */
+--- a/src/gcc/testsuite/gcc.target/i386/avx-vzeroupper-7.c
++++ b/src/gcc/testsuite/gcc.target/i386/avx-vzeroupper-7.c
+@@ -12,4 +12,5 @@ foo ()
+ _mm256_zeroupper ();
+ }
+
+-/* { dg-final { scan-assembler-times "avx_vzeroupper" 1 } } */
++/* { dg-final { scan-assembler-times "avx_vzeroupper" 1 { target ia32 } } } */
++/* { dg-final { scan-assembler-times "avx_vzeroupper" 2 { target { ! ia32 } } } } */
+--- a/src/gcc/testsuite/gcc.target/i386/avx-vzeroupper-9.c
++++ b/src/gcc/testsuite/gcc.target/i386/avx-vzeroupper-9.c
+@@ -15,4 +15,5 @@ foo ()
+ _mm256_zeroupper ();
+ }
+
+-/* { dg-final { scan-assembler-times "avx_vzeroupper" 4 } } */
++/* { dg-final { scan-assembler-times "avx_vzeroupper" 4 { target ia32 } } } */
++/* { dg-final { scan-assembler-times "avx_vzeroupper" 5 { target { ! ia32 } } } } */
+--- a/src/gcc/testsuite/gcc.target/i386/avx2-gather-2.c
++++ b/src/gcc/testsuite/gcc.target/i386/avx2-gather-2.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O3 -fdump-tree-vect-details -march=skylake" } */
++/* { dg-options "-O3 -fdump-tree-vect-details -march=skylake -mtune=haswell" } */
+
+ #include "avx2-gather-1.c"
+
+--- a/src/gcc/testsuite/gcc.target/i386/avx2-gather-6.c
++++ b/src/gcc/testsuite/gcc.target/i386/avx2-gather-6.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O3 -mavx2 -fno-common -fdump-tree-vect-details -mtune=skylake" } */
++/* { dg-options "-O3 -mavx2 -fno-common -fdump-tree-vect-details -mtune=haswell" } */
+
+ #include "avx2-gather-5.c"
+
+--- a/src/gcc/testsuite/gcc.target/i386/avx512f-pr88464-1.c
++++ b/src/gcc/testsuite/gcc.target/i386/avx512f-pr88464-1.c
+@@ -1,6 +1,6 @@
+ /* PR tree-optimization/88464 */
+ /* { dg-do compile } */
+-/* { dg-options "-O3 -mavx512f -mprefer-vector-width=512 -mtune=skylake-avx512 -fdump-tree-vect-details" } */
++/* { dg-options "-O3 -mavx512f -mprefer-vector-width=512 -mtune=haswell -fdump-tree-vect-details" } */
+ /* { dg-final { scan-tree-dump-times "loop vectorized using 64 byte vectors" 4 "vect" } } */
+ /* { dg-final { scan-tree-dump-times "vectorized 1 loops in function" 4 "vect" } } */
+
+--- a/src/gcc/testsuite/gcc.target/i386/avx512f-pr88464-5.c
++++ b/src/gcc/testsuite/gcc.target/i386/avx512f-pr88464-5.c
+@@ -1,6 +1,6 @@
+ /* PR tree-optimization/88464 */
+ /* { dg-do compile } */
+-/* { dg-options "-O3 -mavx512f -mprefer-vector-width=512 -mtune=skylake-avx512 -fdump-tree-vect-details" } */
++/* { dg-options "-O3 -mavx512f -mprefer-vector-width=512 -mtune=haswell -fdump-tree-vect-details" } */
+ /* { dg-final { scan-tree-dump-times "loop vectorized using 64 byte vectors" 4 "vect" } } */
+ /* { dg-final { scan-tree-dump-times "vectorized 1 loops in function" 4 "vect" } } */
+
+--- a/src/gcc/testsuite/gcc.target/i386/avx512vl-pr88464-1.c
++++ b/src/gcc/testsuite/gcc.target/i386/avx512vl-pr88464-1.c
+@@ -1,6 +1,6 @@
+ /* PR tree-optimization/88464 */
+ /* { dg-do compile } */
+-/* { dg-options "-O3 -mavx512vl -mprefer-vector-width=256 -mtune=skylake-avx512 -fdump-tree-vect-details" } */
++/* { dg-options "-O3 -mavx512vl -mprefer-vector-width=256 -mtune=haswell -fdump-tree-vect-details" } */
+ /* { dg-final { scan-tree-dump-times "loop vectorized using 32 byte vectors" 4 "vect" } } */
+ /* { dg-final { scan-tree-dump-times "vectorized 1 loops in function" 4 "vect" } } */
+
+--- a/src/gcc/testsuite/gcc.target/i386/avx512vl-pr88464-11.c
++++ b/src/gcc/testsuite/gcc.target/i386/avx512vl-pr88464-11.c
+@@ -1,6 +1,6 @@
+ /* PR tree-optimization/88464 */
+ /* { dg-do compile } */
+-/* { dg-options "-O3 -mavx512vl -mprefer-vector-width=128 -mtune=skylake-avx512 -fdump-tree-vect-details" } */
++/* { dg-options "-O3 -mavx512vl -mprefer-vector-width=128 -mtune=haswell -fdump-tree-vect-details" } */
+ /* { dg-final { scan-tree-dump-times "loop vectorized using 16 byte vectors" 4 "vect" } } */
+ /* { dg-final { scan-tree-dump-times "vectorized 1 loops in function" 4 "vect" } } */
+
+--- a/src/gcc/testsuite/gcc.target/i386/avx512vl-pr88464-3.c
++++ b/src/gcc/testsuite/gcc.target/i386/avx512vl-pr88464-3.c
+@@ -1,6 +1,6 @@
+ /* PR tree-optimization/88464 */
+ /* { dg-do compile } */
+-/* { dg-options "-O3 -mavx512vl -mprefer-vector-width=128 -mtune=skylake-avx512 -fdump-tree-vect-details" } */
++/* { dg-options "-O3 -mavx512vl -mprefer-vector-width=128 -mtune=haswell -fdump-tree-vect-details" } */
+ /* { dg-final { scan-tree-dump-times "loop vectorized using 16 byte vectors" 4 "vect" } } */
+ /* { dg-final { scan-tree-dump-times "vectorized 1 loops in function" 4 "vect" } } */
+
+--- a/src/gcc/testsuite/gcc.target/i386/avx512vl-pr88464-9.c
++++ b/src/gcc/testsuite/gcc.target/i386/avx512vl-pr88464-9.c
+@@ -1,6 +1,6 @@
+ /* PR tree-optimization/88464 */
+ /* { dg-do compile } */
+-/* { dg-options "-O3 -mavx512vl -mprefer-vector-width=256 -mtune=skylake-avx512 -fdump-tree-vect-details" } */
++/* { dg-options "-O3 -mavx512vl -mprefer-vector-width=256 -mtune=haswell -fdump-tree-vect-details" } */
+ /* { dg-final { scan-tree-dump-times "loop vectorized using 32 byte vectors" 4 "vect" } } */
+ /* { dg-final { scan-tree-dump-times "vectorized 1 loops in function" 4 "vect" } } */
+
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/i386/mvc17.c
+@@ -0,0 +1,11 @@
++/* { dg-do compile } */
++/* { dg-require-ifunc "" } */
++/* { dg-options "-O2 -march=x86-64" } */
++/* { dg-final { scan-assembler-times "rep mov" 1 } } */
++
++__attribute__((target_clones("default","arch=icelake-server")))
++void
++foo (char *a, char *b, int size)
++{
++ __builtin_memcpy (a, b, size & 0x7F);
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/i386/pr110108-2.c
+@@ -0,0 +1,14 @@
++/* { dg-do compile } */
++/* { dg-options "-mavx2 -O2 -funsigned-char" } */
++/* { dg-final { scan-assembler-times "vpblendvb" 2 } } */
++
++#include <immintrin.h>
++__m128i do_stuff_128(__m128i X0, __m128i X1, __m128i X2) {
++ __m128i Result = _mm_blendv_epi8(X0, X1, X2);
++ return Result;
++}
++
++__m256i do_stuff_256(__m256i X0, __m256i X1, __m256i X2) {
++ __m256i Result = _mm256_blendv_epi8(X0, X1, X2);
++ return Result;
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/i386/pr110206.c
+@@ -0,0 +1,39 @@
++/* PR target/110206 */
++/* { dg-do run } */
++/* { dg-options "-Os -mavx512bw -mavx512vl" } */
++/* { dg-require-effective-target avx512bw } */
++/* { dg-require-effective-target avx512vl } */
++
++#define AVX512BW
++#define AVX512VL
++
++#include "avx512f-check.h"
++
++typedef unsigned char __attribute__((__vector_size__ (4))) U;
++typedef unsigned char __attribute__((__vector_size__ (8))) V;
++typedef unsigned short u16;
++
++V g;
++
++void
++__attribute__((noinline))
++foo (U u, u16 c, V *r)
++{
++ if (!c)
++ abort ();
++ V x = __builtin_shufflevector (u, (204 >> u), 7, 0, 5, 1, 3, 5, 0, 2);
++ V y = __builtin_shufflevector (g, (V) { }, 7, 6, 6, 7, 2, 6, 3, 5);
++ V z = __builtin_shufflevector (y, 204 * x, 3, 9, 8, 1, 4, 6, 14, 5);
++ *r = z;
++}
++
++static void test_256 (void) { };
++
++static void
++test_128 (void)
++{
++ V r;
++ foo ((U){4}, 5, &r);
++ if (r[6] != 0x30)
++ abort();
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/i386/pr110309.c
+@@ -0,0 +1,10 @@
++/* { dg-do compile } */
++/* { dg-options "-O3 --param vect-partial-vector-usage=1 -march=znver4 -mprefer-vector-width=256" } */
++/* { dg-final { scan-assembler-not {(?n)vpblendd.*ymm} } } */
++
++
++void foo (int * __restrict a, int *b)
++{
++ for (int i = 0; i < 6; ++i)
++ a[i] = b[i] + 42;
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/i386/pr111306.c
+@@ -0,0 +1,36 @@
++/* { dg-do run } */
++/* { dg-options "-O2 -mavx512fp16 -mavx512vl" } */
++/* { dg-require-effective-target avx512fp16 } */
++
++#define AVX512FP16
++#include "avx512f-helper.h"
++
++__attribute__((optimize("O2"),noipa))
++void func1(_Float16 *a, _Float16 *b, int n, _Float16 *c) {
++ __m512h rA = _mm512_loadu_ph(a);
++ for (int i = 0; i < n; i += 32) {
++ __m512h rB = _mm512_loadu_ph(b + i);
++ _mm512_storeu_ph(c + i, _mm512_fcmul_pch(rB, rA));
++ }
++}
++
++void
++test_512 (void)
++{
++ int n = 32;
++ _Float16 a[n], b[n], c[n];
++ _Float16 exp[n];
++ for (int i = 1; i <= n; i++) {
++ a[i - 1] = i & 1 ? -i : i;
++ b[i - 1] = i;
++ }
++
++ func1(a, b, n, c);
++ for (int i = 0; i < n / 32; i += 2) {
++ if (c[i] != a[i] * b[i] + a[i+1] * b[i+1]
++ || c[i+1] != a[i] * b[i+1] - a[i+1]*b[i])
++ __builtin_abort ();
++ }
++}
++
++
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/i386/pr111340.c
+@@ -0,0 +1,9 @@
++/* PR target/111340 */
++/* { dg-do compile { target { fpic && int128 } } } */
++/* { dg-options "-O2 -fpic" } */
++
++void
++bar (void)
++{
++ __asm ("# %0" : : "g" ((((unsigned __int128) 0x123456789abcdef0ULL) << 64) | 0x0fedcba987654321ULL));
++}
+--- a/src/gcc/testsuite/gcc.target/i386/pr88531-1b.c
++++ b/src/gcc/testsuite/gcc.target/i386/pr88531-1b.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O3 -march=skylake -mfpmath=sse" } */
++/* { dg-options "-O3 -march=skylake -mfpmath=sse -mtune=haswell" } */
+
+ #include "pr88531-1a.c"
+
+--- a/src/gcc/testsuite/gcc.target/i386/pr88531-1c.c
++++ b/src/gcc/testsuite/gcc.target/i386/pr88531-1c.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O3 -march=skylake-avx512 -mfpmath=sse" } */
++/* { dg-options "-O3 -march=skylake-avx512 -mfpmath=sse -mtune=haswell" } */
+
+ #include "pr88531-1a.c"
+
+--- a/src/gcc/testsuite/gcc.target/powerpc/clone1.c
++++ b/src/gcc/testsuite/gcc.target/powerpc/clone1.c
+@@ -21,6 +21,7 @@ long mod_func_or (long a, long b, long c)
+ return mod_func (a, b) | c;
+ }
+
+-/* { dg-final { scan-assembler-times {\mdivd\M} 1 } } */
+-/* { dg-final { scan-assembler-times {\mmulld\M} 1 } } */
+-/* { dg-final { scan-assembler-times {\mmodsd\M} 1 } } */
++/* { Fail due to RS6000_DISABLE_SCALAR_MODULO. */
++/* { dg-final { scan-assembler-times {\mdivd\M} 1 { xfail *-*-* } } } */
++/* { dg-final { scan-assembler-times {\mmulld\M} 1 { xfail *-*-* } } } */
++/* { dg-final { scan-assembler-times {\mmodsd\M} 1 { xfail *-*-* } } } */
+--- a/src/gcc/testsuite/gcc.target/powerpc/clone3.c
++++ b/src/gcc/testsuite/gcc.target/powerpc/clone3.c
+@@ -27,7 +27,8 @@ long mod_func_or (long a, long b, long c)
+ return mod_func (a, b) | c;
+ }
+
+-/* { dg-final { scan-assembler-times {\mdivd\M} 1 } } */
+-/* { dg-final { scan-assembler-times {\mmulld\M} 1 } } */
+-/* { dg-final { scan-assembler-times {\mmodsd\M} 2 } } */
++/* { Fail due to RS6000_DISABLE_SCALAR_MODULO. */
++/* { dg-final { scan-assembler-times {\mdivd\M} 1 { xfail *-*-* } } } */
++/* { dg-final { scan-assembler-times {\mmulld\M} 1 { xfail *-*-* } } } */
++/* { dg-final { scan-assembler-times {\mmodsd\M} 2 { xfail *-*-* } } } */
+ /* { dg-final { scan-assembler-times {\mpld\M} 1 } } */
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/powerpc/darwin-abi-13-0.c
+@@ -0,0 +1,23 @@
++/* { dg-do compile { target powerpc*-*-darwin* } } */
++/* { dg-require-effective-target ilp32 } */
++/* { dg-options "-Wno-long-long" } */
++
++#include "darwin-structs-0.h"
++
++int tcd[sizeof(cd) != 12 ? -1 : 1];
++int acd[__alignof__(cd) != 4 ? -1 : 1];
++
++int sdc[sizeof(dc) != 16 ? -1 : 1];
++int adc[__alignof__(dc) != 8 ? -1 : 1];
++
++int scL[sizeof(cL) != 12 ? -1 : 1];
++int acL[__alignof__(cL) != 4 ? -1 : 1];
++
++int sLc[sizeof(Lc) != 16 ? -1 : 1];
++int aLc[__alignof__(Lc) != 8 ? -1 : 1];
++
++int scD[sizeof(cD) != 32 ? -1 : 1];
++int acD[__alignof__(cD) != 16 ? -1 : 1];
++
++int sDc[sizeof(Dc) != 32 ? -1 : 1];
++int aDc[__alignof__(Dc) != 16 ? -1 : 1];
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/powerpc/darwin-abi-13-1.c
+@@ -0,0 +1,27 @@
++/* { dg-do compile { target powerpc*-*-darwin* } } */
++/* { dg-require-effective-target ilp32 } */
++/* { dg-options "-Wno-long-long" } */
++
++#pragma pack(push, 1)
++
++#include "darwin-structs-0.h"
++
++int tcd[sizeof(cd) != 9 ? -1 : 1];
++int acd[__alignof__(cd) != 1 ? -1 : 1];
++
++int sdc[sizeof(dc) != 9 ? -1 : 1];
++int adc[__alignof__(dc) != 1 ? -1 : 1];
++
++int scL[sizeof(cL) != 9 ? -1 : 1];
++int acL[__alignof__(cL) != 1 ? -1 : 1];
++
++int sLc[sizeof(Lc) != 9 ? -1 : 1];
++int aLc[__alignof__(Lc) != 1 ? -1 : 1];
++
++int scD[sizeof(cD) != 17 ? -1 : 1];
++int acD[__alignof__(cD) != 1 ? -1 : 1];
++
++int sDc[sizeof(Dc) != 17 ? -1 : 1];
++int aDc[__alignof__(Dc) != 1 ? -1 : 1];
++
++#pragma pack(pop)
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/powerpc/darwin-abi-13-2.c
+@@ -0,0 +1,27 @@
++/* { dg-do compile { target powerpc*-*-darwin* } } */
++/* { dg-require-effective-target ilp32 } */
++/* { dg-options "-Wno-long-long" } */
++
++#pragma pack(push, 2)
++
++#include "darwin-structs-0.h"
++
++int tcd[sizeof(cd) != 10 ? -1 : 1];
++int acd[__alignof__(cd) != 2 ? -1 : 1];
++
++int sdc[sizeof(dc) != 10 ? -1 : 1];
++int adc[__alignof__(dc) != 2 ? -1 : 1];
++
++int scL[sizeof(cL) != 10 ? -1 : 1];
++int acL[__alignof__(cL) != 2 ? -1 : 1];
++
++int sLc[sizeof(Lc) != 10 ? -1 : 1];
++int aLc[__alignof__(Lc) != 2 ? -1 : 1];
++
++int scD[sizeof(cD) != 18 ? -1 : 1];
++int acD[__alignof__(cD) != 2 ? -1 : 1];
++
++int sDc[sizeof(Dc) != 18 ? -1 : 1];
++int aDc[__alignof__(Dc) != 2 ? -1 : 1];
++
++#pragma pack(pop)
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/powerpc/darwin-structs-0.h
+@@ -0,0 +1,29 @@
++typedef struct _cd {
++ char c;
++ double d;
++} cd;
++
++typedef struct _dc {
++ double d;
++ char c;
++} dc;
++
++typedef struct _cL {
++ char c;
++ long long L;
++} cL;
++
++typedef struct _Lc {
++ long long L;
++ char c;
++} Lc;
++
++typedef struct _cD {
++ char c;
++ long double D;
++} cD;
++
++typedef struct _Dc {
++ long double D;
++ char c;
++} Dc;
+--- a/src/gcc/testsuite/gcc.target/powerpc/fusion-p10-ldcmpi.c
++++ b/src/gcc/testsuite/gcc.target/powerpc/fusion-p10-ldcmpi.c
+@@ -54,15 +54,17 @@ TEST(uint8_t)
+ TEST(int8_t)
+
+ /* { dg-final { scan-assembler-times "lbz_cmpldi_cr0_QI_clobber_CCUNS_zero" 4 { target lp64 } } } */
+-/* { dg-final { scan-assembler-times "ld_cmpdi_cr0_DI_DI_CC_none" 4 { target lp64 } } } */
+-/* { dg-final { scan-assembler-times "ld_cmpdi_cr0_DI_clobber_CC_none" 4 { target lp64 } } } */
+-/* { dg-final { scan-assembler-times "ld_cmpldi_cr0_DI_DI_CCUNS_none" 1 { target lp64 } } } */
+-/* { dg-final { scan-assembler-times "ld_cmpldi_cr0_DI_clobber_CCUNS_none" 1 { target lp64 } } } */
++/* { dg-final { scan-assembler-times "ld_cmpdi_cr0_DI_DI_CC_none" 24 { target lp64 } } } */
++/* { dg-final { scan-assembler-times "ld_cmpdi_cr0_DI_clobber_CC_none" 8 { target lp64 } } } */
++/* { dg-final { scan-assembler-times "ld_cmpldi_cr0_DI_DI_CCUNS_none" 2 { target lp64 } } } */
++/* { dg-final { scan-assembler-times "ld_cmpldi_cr0_DI_clobber_CCUNS_none" 2 { target lp64 } } } */
+ /* { dg-final { scan-assembler-times "lha_cmpdi_cr0_HI_clobber_CC_sign" 16 { target lp64 } } } */
+ /* { dg-final { scan-assembler-times "lhz_cmpldi_cr0_HI_clobber_CCUNS_zero" 4 { target lp64 } } } */
+ /* { dg-final { scan-assembler-times "lwa_cmpdi_cr0_SI_EXTSI_CC_sign" 0 { target lp64 } } } */
+-/* { dg-final { scan-assembler-times "lwa_cmpdi_cr0_SI_clobber_CC_none" 4 { target lp64 } } } */
++/* { dg-final { scan-assembler-times "lwz_cmpwi_cr0_SI_clobber_CC_none" 8 { target lp64 } } } */
++/* { dg-final { scan-assembler-times "lwz_cmpwi_cr0_SI_SI_CC_none" 8 { target lp64 } } } */
+ /* { dg-final { scan-assembler-times "lwz_cmpldi_cr0_SI_EXTSI_CCUNS_zero" 0 { target lp64 } } } */
++/* { dg-final { scan-assembler-times "lwz_cmpldi_cr0_SI_SI_CCUNS_none" 2 { target lp64 } } } */
+ /* { dg-final { scan-assembler-times "lwz_cmpldi_cr0_SI_clobber_CCUNS_none" 2 { target lp64 } } } */
+
+ /* { dg-final { scan-assembler-times "lbz_cmpldi_cr0_QI_clobber_CCUNS_zero" 2 { target ilp32 } } } */
+@@ -73,6 +75,8 @@ TEST(int8_t)
+ /* { dg-final { scan-assembler-times "lha_cmpdi_cr0_HI_clobber_CC_sign" 8 { target ilp32 } } } */
+ /* { dg-final { scan-assembler-times "lhz_cmpldi_cr0_HI_clobber_CCUNS_zero" 2 { target ilp32 } } } */
+ /* { dg-final { scan-assembler-times "lwa_cmpdi_cr0_SI_EXTSI_CC_sign" 0 { target ilp32 } } } */
+-/* { dg-final { scan-assembler-times "lwa_cmpdi_cr0_SI_clobber_CC_none" 9 { target ilp32 } } } */
++/* { dg-final { scan-assembler-times "lwz_cmpwi_cr0_SI_SI_CC_none" 36 { target ilp32 } } } */
++/* { dg-final { scan-assembler-times "lwz_cmpwi_cr0_SI_clobber_CC_none" 16 { target ilp32 } } } */
+ /* { dg-final { scan-assembler-times "lwz_cmpldi_cr0_SI_EXTSI_CCUNS_zero" 0 { target ilp32 } } } */
+ /* { dg-final { scan-assembler-times "lwz_cmpldi_cr0_SI_clobber_CCUNS_none" 6 { target ilp32 } } } */
++/* { dg-final { scan-assembler-times "lwz_cmpldi_cr0_SI_SI_CCUNS_none" 2 { target ilp32 } } } */
+--- a/src/gcc/testsuite/gcc.target/powerpc/mod-1.c
++++ b/src/gcc/testsuite/gcc.target/powerpc/mod-1.c
+@@ -7,13 +7,14 @@ long lsmod (long a, long b) { return a%b; }
+ unsigned int iumod (unsigned int a, unsigned int b) { return a%b; }
+ unsigned long lumod (unsigned long a, unsigned long b) { return a%b; }
+
+-/* { dg-final { scan-assembler-times "modsw " 1 } } */
+-/* { dg-final { scan-assembler-times "modsd " 1 } } */
+-/* { dg-final { scan-assembler-times "moduw " 1 } } */
+-/* { dg-final { scan-assembler-times "modud " 1 } } */
+-/* { dg-final { scan-assembler-not "mullw " } } */
+-/* { dg-final { scan-assembler-not "mulld " } } */
+-/* { dg-final { scan-assembler-not "divw " } } */
+-/* { dg-final { scan-assembler-not "divd " } } */
+-/* { dg-final { scan-assembler-not "divwu " } } */
+-/* { dg-final { scan-assembler-not "divdu " } } */
++/* { Fail due to RS6000_DISABLE_SCALAR_MODULO. */
++/* { dg-final { scan-assembler-times {\mmodsw\M} 1 { xfail *-*-* } } } */
++/* { dg-final { scan-assembler-times {\mmodsd\M} 1 { xfail *-*-* } } } */
++/* { dg-final { scan-assembler-times {\mmoduw\M} 1 { xfail *-*-* } } } */
++/* { dg-final { scan-assembler-times {\mmodud\M} 1 { xfail *-*-* } } } */
++/* { dg-final { scan-assembler-not {\mmullw\M} { xfail *-*-* } } } */
++/* { dg-final { scan-assembler-not {\mmulld\M} { xfail *-*-* } } } */
++/* { dg-final { scan-assembler-not {\mdivw\M} { xfail *-*-* } } } */
++/* { dg-final { scan-assembler-not {\mdivd\M} { xfail *-*-* } } } */
++/* { dg-final { scan-assembler-not {\mdivwu\M} { xfail *-*-* } } } */
++/* { dg-final { scan-assembler-not {\mdivdu\M} { xfail *-*-* } } } */
+--- a/src/gcc/testsuite/gcc.target/powerpc/mod-2.c
++++ b/src/gcc/testsuite/gcc.target/powerpc/mod-2.c
+@@ -5,8 +5,9 @@
+ int ismod (int a, int b) { return a%b; }
+ unsigned int iumod (unsigned int a, unsigned int b) { return a%b; }
+
+-/* { dg-final { scan-assembler-times "modsw " 1 } } */
+-/* { dg-final { scan-assembler-times "moduw " 1 } } */
+-/* { dg-final { scan-assembler-not "mullw " } } */
+-/* { dg-final { scan-assembler-not "divw " } } */
+-/* { dg-final { scan-assembler-not "divwu " } } */
++/* { Fail due to RS6000_DISABLE_SCALAR_MODULO. */
++/* { dg-final { scan-assembler-times {\mmodsw\M} 1 { xfail *-*-* } } } */
++/* { dg-final { scan-assembler-times {\mmoduw\M} 1 { xfail *-*-* } } } */
++/* { dg-final { scan-assembler-not {\mmullw\M} { xfail *-*-* } } } */
++/* { dg-final { scan-assembler-not {\mdivw\M} { xfail *-*-* } } } */
++/* { dg-final { scan-assembler-not {\mdivwu\M} { xfail *-*-* } } } */
+--- a/src/gcc/testsuite/gcc.target/powerpc/p10-vdivq-vmodq.c
++++ b/src/gcc/testsuite/gcc.target/powerpc/p10-vdivq-vmodq.c
+@@ -23,5 +23,6 @@ __int128 s_mod(__int128 a, __int128 b)
+
+ /* { dg-final { scan-assembler {\mvdivsq\M} } } */
+ /* { dg-final { scan-assembler {\mvdivuq\M} } } */
+-/* { dg-final { scan-assembler {\mvmodsq\M} } } */
+-/* { dg-final { scan-assembler {\mvmoduq\M} } } */
++/* { Fail due to RS6000_DISABLE_SCALAR_MODULO. */
++/* { dg-final { scan-assembler {\mvmodsq\M} { xfail *-*-* } } } */
++/* { dg-final { scan-assembler {\mvmoduq\M} { xfail *-*-* } } } */
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/powerpc/pr109069-1.c
+@@ -0,0 +1,25 @@
++/* { dg-do run } */
++/* { dg-require-effective-target vmx_hw } */
++/* { dg-options "-O2 -maltivec" } */
++
++/* Verify it run successfully. */
++
++#include <altivec.h>
++
++__attribute__ ((noipa))
++vector signed int
++test ()
++{
++ vector signed int v = {-16, -16, -16, -16};
++ vector signed int res = vec_sld (v, v, 3);
++ return res;
++}
++
++int
++main ()
++{
++ vector signed int res = test ();
++ if (res[0] != 0xf0ffffff)
++ __builtin_abort ();
++ return 0;
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/powerpc/pr109069-2-run.c
+@@ -0,0 +1,50 @@
++/* { dg-do run } */
++/* { dg-require-effective-target vsx_hw } */
++/* { dg-options "-O2 -mvsx" } */
++
++/* Verify it doesn't generate wrong code. */
++
++#include "pr109069-2.h"
++
++int
++main ()
++{
++ vector unsigned char res1 = test1 ();
++ for (int i = 0; i < 16; i++)
++ if (res1[i] != 0xd)
++ __builtin_abort ();
++
++ vector signed short res2 = test2 ();
++ for (int i = 0; i < 8; i++)
++ if (res2[i] != 0x7777)
++ __builtin_abort ();
++
++ vector signed int res3 = test3 ();
++ vector unsigned int res4 = test4 ();
++ vector float res6 = test6 ();
++ for (int i = 0; i < 4; i++)
++ {
++ if (res3[i] != 0xbbbbbbbb)
++ __builtin_abort ();
++ if (res4[i] != 0x7070707)
++ __builtin_abort ();
++ U32b u;
++ u.f = res6[i];
++ if (u.i != 0x17171717)
++ __builtin_abort ();
++ }
++
++ vector unsigned long long res5 = test5 ();
++ vector double res7 = test7 ();
++ for (int i = 0; i < 2; i++)
++ {
++ if (res5[i] != 0x4545454545454545ll)
++ __builtin_abort ();
++ U64b u;
++ u.f = res7[i];
++ if (u.i != 0x5454545454545454ll)
++ __builtin_abort ();
++ }
++ return 0;
++}
++
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/powerpc/pr109069-2.c
+@@ -0,0 +1,12 @@
++/* { dg-do compile } */
++/* { dg-require-effective-target powerpc_vsx_ok } */
++/* Disable rs6000 optimize_swaps as it drops some REG_EQUAL
++ notes on const vector and affects test point here. */
++/* { dg-options "-O2 -mvsx -mno-optimize-swaps" } */
++
++/* Verify we can optimize away vector shifting if every byte
++ of vector is the same. */
++
++#include "pr109069-2.h"
++
++/* { dg-final { scan-assembler-not {\mvsldoi\M} } } */
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/powerpc/pr109069-2.h
+@@ -0,0 +1,83 @@
++#include <altivec.h>
++
++typedef union
++{
++ unsigned int i;
++ float f;
++} U32b;
++
++typedef union
++{
++ unsigned long long i;
++ double f;
++} U64b;
++
++__attribute__ ((noipa))
++vector unsigned char
++test1 ()
++{
++ vector unsigned char v = {0xd, 0xd, 0xd, 0xd, 0xd, 0xd, 0xd, 0xd,
++ 0xd, 0xd, 0xd, 0xd, 0xd, 0xd, 0xd, 0xd};
++ vector unsigned char res = vec_sld (v, v, 3);
++ return res;
++}
++
++__attribute__ ((noipa))
++vector signed short
++test2 ()
++{
++ vector signed short v
++ = {0x7777, 0x7777, 0x7777, 0x7777, 0x7777, 0x7777, 0x7777, 0x7777};
++ vector signed short res = vec_sld (v, v, 5);
++ return res;
++}
++
++__attribute__ ((noipa))
++vector signed int
++test3 ()
++{
++ vector signed int v = {0xbbbbbbbb, 0xbbbbbbbb, 0xbbbbbbbb, 0xbbbbbbbb};
++ vector signed int res = vec_sld (v, v, 7);
++ return res;
++}
++
++__attribute__ ((noipa))
++vector unsigned int
++test4 ()
++{
++ vector unsigned int v = {0x07070707, 0x07070707, 0x07070707, 0x07070707};
++ vector unsigned int res = vec_sld (v, v, 9);
++ return res;
++}
++
++__attribute__ ((noipa))
++vector unsigned long long
++test5 ()
++{
++ vector unsigned long long v = {0x4545454545454545ll, 0x4545454545454545ll};
++ vector unsigned long long res = vec_sld (v, v, 10);
++ return res;
++}
++
++__attribute__ ((noipa))
++vector float
++test6 ()
++{
++ U32b u;
++ u.i = 0x17171717;
++ vector float vf = {u.f, u.f, u.f, u.f};
++ vector float res = vec_sld (vf, vf, 11);
++ return res;
++}
++
++__attribute__ ((noipa))
++vector double
++test7 ()
++{
++ U64b u;
++ u.i = 0x5454545454545454ll;
++ vector double vf = {u.f, u.f};
++ vector double res = vec_sld (vf, vf, 13);
++ return res;
++}
++
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/powerpc/pr109932-1.c
+@@ -0,0 +1,17 @@
++/* { dg-require-effective-target int128 } */
++/* { dg-require-effective-target powerpc_altivec_ok } */
++/* { dg-options "-maltivec -mno-vsx" } */
++
++/* Verify there is no ICE but one expected error message instead. */
++
++#include <altivec.h>
++
++extern vector signed __int128 res_vslll;
++extern unsigned long long aull[2];
++
++void
++testVectorInt128Pack ()
++{
++ res_vslll = __builtin_pack_vector_int128 (aull[0], aull[1]); /* { dg-error "'__builtin_pack_vector_int128' requires the '-mvsx' option" } */
++}
++
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/powerpc/pr109932-2.c
+@@ -0,0 +1,17 @@
++/* { dg-require-effective-target int128 } */
++/* { dg-require-effective-target powerpc_altivec_ok } */
++/* { dg-options "-maltivec -mno-vsx" } */
++
++/* Verify there is no ICE but one expected error message instead. */
++
++#include <altivec.h>
++
++extern vector signed __int128 res_vslll;
++extern unsigned long long aull[2];
++
++void
++testVectorInt128Pack ()
++{
++ res_vslll = __builtin_pack_vector_int128 (aull[0], aull[1]); /* { dg-error "'__builtin_pack_vector_int128' requires the '-mvsx' option" } */
++}
++
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/powerpc/pr110011.c
+@@ -0,0 +1,42 @@
++/* { dg-do run } */
++/* { dg-require-effective-target float128_runtime } */
++/* Force long double to be with IBM format here, to verify
++ _Float128 constant still uses its own format (IEEE) for
++ encoding rather than IBM format. */
++/* { dg-options "-mfp-in-toc -mabi=ibmlongdouble" } */
++/* { dg-add-options float128 } */
++
++#define MPFR_FLOAT128_MAX 0x1.ffffffffffffffffffffffffffffp+16383f128
++
++__attribute__ ((noipa))
++_Float128 f128_max ()
++{
++ return MPFR_FLOAT128_MAX;
++}
++
++typedef union
++{
++ int w[4];
++ _Float128 f128;
++} U;
++
++int main ()
++{
++
++ U umax;
++ umax.f128 = f128_max ();
++ /* ieee float128 max:
++ 7ffeffff ffffffff ffffffff ffffffff. */
++ if (umax.w[1] != 0xffffffff || umax.w[2] != 0xffffffff)
++ __builtin_abort ();
++#ifdef __LITTLE_ENDIAN__
++ if (umax.w[0] != 0xffffffff || umax.w[3] != 0x7ffeffff)
++ __builtin_abort ();
++#else
++ if (umax.w[3] != 0xffffffff || umax.w[0] != 0x7ffeffff)
++ __builtin_abort ();
++#endif
++
++ return 0;
++}
++
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/powerpc/pr70243.c
+@@ -0,0 +1,41 @@
++/* { dg-do compile */
++/* { dg-require-effective-target powerpc_vsx_ok } */
++/* { dg-options "-O2 -mvsx" } */
++
++/* PR 70423, Make sure we don't generate vmaddfp or vnmsubfp. These
++ instructions have different rounding modes than the VSX instructions
++ xvmaddsp and xvnmsubsp. These tests are written where the 3 inputs and
++ target are all separate registers. Because vmaddfp and vnmsubfp are no
++ longer generated the compiler will have to generate an xsmaddsp or xsnmsubsp
++ instruction followed by a move operation. */
++
++#include <altivec.h>
++
++vector float
++do_add1 (vector float dummy, vector float a, vector float b, vector float c)
++{
++ return (a * b) + c;
++}
++
++vector float
++do_nsub1 (vector float dummy, vector float a, vector float b, vector float c)
++{
++ return -((a * b) - c);
++}
++
++vector float
++do_add2 (vector float dummy, vector float a, vector float b, vector float c)
++{
++ return vec_madd (a, b, c);
++}
++
++vector float
++do_nsub2 (vector float dummy, vector float a, vector float b, vector float c)
++{
++ return vec_nmsub (a, b, c);
++}
++
++/* { dg-final { scan-assembler {\mxvmadd[am]sp\M} } } */
++/* { dg-final { scan-assembler {\mxvnmsub[am]sp\M} } } */
++/* { dg-final { scan-assembler-not {\mvmaddfp\M} } } */
++/* { dg-final { scan-assembler-not {\mvnmsubfp\M} } } */
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gcc.target/powerpc/pr96762.c
+@@ -0,0 +1,13 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mdejagnu-cpu=power10" } */
++
++/* Verify there is no ICE on ilp32 env. */
++
++extern void foo (char *);
++
++void
++bar (void)
++{
++ char zj[] = "XXXXXXXXXXXXXXXX";
++ foo (zj);
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gdc.dg/pr108842.d
+@@ -0,0 +1,4 @@
++// { dg-do compile }
++// { dg-options "-fno-rtti" }
++module object;
++enum int[] x = [0, 1, 2];
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gdc.dg/pr110359.d
+@@ -0,0 +1,22 @@
++// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=110359
++// { dg-do compile }
++// { dg-options "-fdump-tree-original" }
++double pow(in double x, in ulong p)
++{
++ import gcc.builtins : __builtin_expect;
++ if (__builtin_expect(p == 0, false))
++ return 1;
++ if (__builtin_expect(p == 1, false))
++ return x;
++
++ double s = x;
++ double v = 1;
++ for (ulong i = p; i > 1; i >>= 1)
++ {
++ v = (i & 0x1) ? s * v : v;
++ s = s * s;
++ }
++ return v * s;
++}
++// { dg-final { scan-tree-dump "if \\(__builtin_expect \\(p == 0, 0\\) != 0\\)" "original" } }
++// { dg-final { scan-tree-dump "if \\(__builtin_expect \\(p == 1, 0\\) != 0\\)" "original" } }
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gdc.dg/pr110514a.d
+@@ -0,0 +1,9 @@
++// { dg-do "compile" }
++// { dg-options "-O -fdump-tree-optimized" }
++immutable uint[] imm_arr = [1,2,3];
++int test_imm(immutable uint[] ptr)
++{
++ return imm_arr[2] == 3 ? 123 : 456;
++}
++// { dg-final { scan-assembler-not "_d_arraybounds_indexp" } }
++// { dg-final { scan-tree-dump "return 123;" optimized } }
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gdc.dg/pr110514b.d
+@@ -0,0 +1,8 @@
++// { dg-do "compile" }
++// { dg-options "-O" }
++immutable uint[] imm_ctor_arr;
++int test_imm_ctor(immutable uint[] ptr)
++{
++ return imm_ctor_arr[2] == 3;
++}
++// { dg-final { scan-assembler "_d_arraybounds_indexp" } }
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gdc.dg/pr110514c.d
+@@ -0,0 +1,8 @@
++// { dg-do "compile" }
++// { dg-options "-O" }
++const uint[] cst_arr = [1,2,3];
++int test_cst(const uint[] ptr)
++{
++ return cst_arr[2] == 3;
++}
++// { dg-final { scan-assembler "_d_arraybounds_indexp" } }
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gdc.dg/pr110514d.d
+@@ -0,0 +1,8 @@
++// { dg-do "compile" }
++// { dg-options "-O" }
++const uint[] cst_ctor_arr;
++int test_cst_ctor(const uint[] ptr)
++{
++ return cst_ctor_arr[2] == 3;
++}
++// { dg-final { scan-assembler "_d_arraybounds_indexp" } }
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gdc.dg/pr110959.d
+@@ -0,0 +1,32 @@
++// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=110959
++// { dg-do compile }
++class ArsdExceptionBase : object.Exception {
++ this(string operation, string file = __FILE__, size_t line = __LINE__, Throwable next = null) {
++ super(operation, file, line, next);
++ }
++}
++
++template ArsdException(alias Type, DataTuple...) {
++ static if(DataTuple.length)
++ alias Parent = ArsdException!(Type, DataTuple[0 .. $-1]);
++ else
++ alias Parent = ArsdExceptionBase;
++
++ class ArsdException : Parent {
++ DataTuple data;
++
++ this(DataTuple data, string file = __FILE__, size_t line = __LINE__) {
++ this.data = data;
++ static if(is(Parent == ArsdExceptionBase))
++ super(null, file, line);
++ else
++ super(data[0 .. $-1], file, line);
++ }
++
++ static opCall(R...)(R r, string file = __FILE__, size_t line = __LINE__) {
++ return new ArsdException!(Type, DataTuple, R)(r, file, line);
++ }
++ }
++}
++
++__gshared pr110959 = ArsdException!"Test"(4, "four");
+--- a/src/gcc/testsuite/gdc.dg/pr98277.d
++++ b/src/gcc/testsuite/gdc.dg/pr98277.d
+@@ -11,3 +11,14 @@ ref int getSide(Side side, return ref int left, return ref int right)
+ {
+ return side == Side.left ? left : right;
+ }
++
++enum SideA : int[]
++{
++ left = [0],
++ right = [1],
++}
++
++int getSideA(SideA side, ref int left, ref int right)
++{
++ return side == SideA.left ? left : right;
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gdc.dg/torture/pr110516a.d
+@@ -0,0 +1,12 @@
++// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=110516
++// { dg-do compile }
++// { dg-options "-fno-moduleinfo -fdump-tree-optimized" }
++void fn110516(ubyte* ptr)
++{
++ import core.volatile : volatileLoad;
++ volatileLoad(ptr);
++ volatileLoad(ptr);
++ volatileLoad(ptr);
++ volatileLoad(ptr);
++}
++// { dg-final { scan-tree-dump-times " ={v} " 4 "optimized" } }
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gdc.dg/torture/pr110516b.d
+@@ -0,0 +1,12 @@
++// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=110516
++// { dg-do compile }
++// { dg-options "-fno-moduleinfo -fdump-tree-optimized" }
++void fn110516(ubyte* ptr)
++{
++ import core.volatile : volatileStore;
++ volatileStore(ptr, 0);
++ volatileStore(ptr, 0);
++ volatileStore(ptr, 0);
++ volatileStore(ptr, 0);
++}
++// { dg-final { scan-tree-dump-times " ={v} " 4 "optimized" } }
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gdc.test/compilable/test23978.d
+@@ -0,0 +1,30 @@
++// REQUIRED_ARGS: -preview=dip1021 -lowmem
++// https://issues.dlang.org/show_bug.cgi?id=23978
++
++// Note: this is a memory corruption bug.
++// Memory returned by `GC.realloc` retains references to old memory in it,
++// mostly because of the smallarray optimization for `Array(T)`.
++// If this fails again, it might not be consistent, so try running it multiple times.
++
++class LUBench { }
++void lup(ulong , ulong , int , int = 1)
++{
++ new LUBench;
++}
++void lup_3200(ulong iters, ulong flops)
++{
++ lup(iters, flops, 3200);
++}
++void raytrace()
++{
++ struct V
++ {
++ float x, y, z;
++ auto normalize() { }
++ struct Tid { }
++ auto spawnLinked() { }
++ string[] namesByTid;
++ class MessageBox { }
++ auto cross() { }
++ }
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gdc.test/runnable/test23010.d
+@@ -0,0 +1,43 @@
++// https://issues.dlang.org/show_bug.cgi?id=23010
++
++alias AliasSeq(T...) = T;
++
++mixin template faz() {
++ alias T = AliasSeq!(int);
++ T bar = 12345;
++
++ void write1() {
++ assert(bar[0] == 12345);
++ }
++
++ AliasSeq!(string, float) foo = AliasSeq!("qwerty", 1.25f);
++
++ void write2() {
++ assert(foo == AliasSeq!("qwerty", 1.25f));
++ foo = AliasSeq!("asdfg", 2.5f); // this even crashed before
++ assert(foo == AliasSeq!("asdfg", 2.5f));
++ }
++}
++
++void main() {
++ mixin faz!();
++ write1;
++ write2;
++ fun;
++}
++
++// Testing static symbol generation ('toobj.d' changes)
++
++static AliasSeq!(int, string) tup;
++
++void fun()
++{
++ auto v = tup;
++
++ struct S(T...) {
++ static T b;
++ }
++
++ alias T = S!(int, float);
++ auto p = T.b;
++}
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gfortran.dg/deferred_character_37.f90
+@@ -0,0 +1,88 @@
++! { dg-do run }
++! PR fortran/95947
++! PR fortran/110658
++!
++! Test deferred-length character arguments to selected intrinsics
++! that may return a character result of same length as first argument:
++! CSHIFT, EOSHIFT, MAXVAL, MERGE, MINVAL, PACK, SPREAD, TRANSPOSE, UNPACK
++
++program p
++ implicit none
++ call pr95947 ()
++ call pr110658 ()
++ call s ()
++
++contains
++
++ subroutine pr95947
++ character(len=:), allocatable :: m(:)
++
++ m = [ character(len=10) :: 'ape','bat','cat','dog','eel','fly','gnu']
++ m = pack (m, mask=(m(:)(2:2) == 'a'))
++
++! print *, "m = '", m,"' ", "; expected is ['bat','cat']"
++ if (.not. all (m == ['bat','cat'])) stop 1
++
++! print *, "size(m) = ", size(m), "; expected is 2"
++ if (size (m) /= 2) stop 2
++
++! print *, "len(m) = ", len(m), "; expected is 10"
++ if (len (m) /= 10) stop 3
++
++! print *, "len_trim(m) = ", len_trim(m), "; expected is 3 3"
++ if (.not. all (len_trim(m) == [3,3])) stop 4
++ end
++
++ subroutine pr110658
++ character(len=:), allocatable :: array(:), array2(:,:)
++ character(len=:), allocatable :: res, res1(:), res2(:)
++
++ array = ["bb", "aa", "cc"]
++
++ res = minval (array)
++ if (res /= "aa") stop 11
++
++ res = maxval (array, mask=[.true.,.true.,.false.])
++ if (res /= "bb") stop 12
++
++ res1 = cshift (array, 1)
++ if (any (res1 /= ["aa","cc","bb"])) stop 13
++
++ res2 = eoshift (res1, -1)
++ if (any (res2 /= [" ", "aa", "cc"])) stop 14
++
++ res2 = pack (array, mask=[.true.,.false.,.true.])
++ if (any (res2 /= ["bb","cc"])) stop 15
++
++ res2 = unpack (res2, mask=[.true.,.false.,.true.], field="aa")
++ if (any (res2 /= array)) stop 16
++
++ res2 = merge (res2, array, [.true.,.false.,.true.])
++ if (any (res2 /= array)) stop 17
++
++ array2 = spread (array, dim=2, ncopies=2)
++ array2 = transpose (array2)
++ if (any (shape (array2) /= [2,3])) stop 18
++ if (any (array2(2,:) /= array)) stop 19
++ end
++
++ subroutine s
++ character(:), allocatable :: array1(:), array2(:)
++ array1 = ["aa","cc","bb"]
++ array2 = copy (array1)
++ if (any (array1 /= array2)) stop 20
++ end
++
++ function copy (arg) result (res)
++ character(:), allocatable :: res(:)
++ character(*), intent(in) :: arg(:)
++ integer :: i, k, n
++ k = len (arg)
++ n = size (arg)
++ allocate (character(k) :: res(n))
++ do i = 1, n
++ res(i) = arg(i)
++ end do
++ end
++
++end
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gfortran.dg/findloc_10.f90
+@@ -0,0 +1,13 @@
++! { dg-do run }
++! { dg-options "-fdump-tree-original" }
++! PR fortran/110288 - FINDLOC and deferred-length character arguments
++
++program test
++ character(len=:), allocatable :: array(:)
++ character(len=:), allocatable :: value
++ array = ["bb", "aa"]
++ value = "aa"
++ if (findloc (array, value, dim=1) /= 2) stop 1
++end program test
++
++! { dg-final { scan-tree-dump "_gfortran_findloc2_s1 \\(.*, \\.array, \\.value\\)" "original" } }
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gfortran.dg/findloc_9.f90
+@@ -0,0 +1,19 @@
++! { dg-do compile }
++! { dg-options "-fdump-tree-original" }
++! PR fortran/110585 - simplification of FINDLOC for constant complex arguments
++
++program mvce
++ implicit none
++ integer, parameter :: a(*) = findloc([(1.,0.),(2.,1.)], (2.,0.))
++ integer, parameter :: b(*) = findloc([(1.,0.),(2.,1.)], (2.,0.), back=.true.)
++ integer, parameter :: c(*) = findloc([(1.,0.),(2.,1.)], (2.,1.))
++ integer, parameter :: d(*) = findloc([(1.,0.),(2.,1.)], (2.,1.), back=.true.)
++ integer, parameter :: e = findloc([(1.,0.),(2.,1.)], (2.,1.), dim=1)
++ if (a(1) /= 0) stop 1
++ if (b(1) /= 0) stop 2
++ if (c(1) /= 2) stop 3
++ if (d(1) /= 2) stop 4
++ if (e /= 2) stop 5
++end
++
++! { dg-final { scan-tree-dump-not "_gfortran_stop_numeric" "original" } }
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gfortran.dg/pr107397.f90
+@@ -0,0 +1,9 @@
++!{ dg-do compile }
++!
++program p
++ type t
++ real :: a = 1.0
++ end type
++ type(t), parameter :: x = z'1' ! { dg-error "incompatible with a BOZ" }
++ x%a = x%a + 2 ! { dg-error "has no IMPLICIT type" }
++end
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gfortran.dg/ptr-func-5.f90
+@@ -0,0 +1,39 @@
++! { dg-do compile }
++! PR fortran/109846
++! CLASS pointer function result in variable definition context
++
++module foo
++ implicit none
++ type :: parameter_list
++ contains
++ procedure :: sublist, sublist_nores
++ end type
++contains
++ function sublist (this) result (slist)
++ class(parameter_list), intent(inout) :: this
++ class(parameter_list), pointer :: slist
++ allocate (slist)
++ end function
++ function sublist_nores (this)
++ class(parameter_list), intent(inout) :: this
++ class(parameter_list), pointer :: sublist_nores
++ allocate (sublist_nores)
++ end function
++end module
++
++program example
++ use foo
++ implicit none
++ type(parameter_list) :: plist
++ call sub1 (plist%sublist())
++ call sub1 (plist%sublist_nores())
++ call sub2 (plist%sublist())
++ call sub2 (plist%sublist_nores())
++contains
++ subroutine sub1 (plist)
++ type(parameter_list), intent(inout) :: plist
++ end subroutine
++ subroutine sub2 (plist)
++ type(parameter_list) :: plist
++ end subroutine
++end program
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gfortran.dg/select_rank_6.f90
+@@ -0,0 +1,48 @@
++! { dg-do compile }
++! PR fortran/100607 - fix diagnostics for SELECT RANK
++! Contributed by T.Burnus
++
++program p
++ implicit none
++ integer, allocatable :: A(:,:,:)
++
++ allocate(a(5:6,-2:2, 99:100))
++ call foo(a)
++ call bar(a)
++
++contains
++
++ subroutine foo(x)
++ integer, allocatable :: x(..)
++ if (rank(x) /= 3) stop 1
++ if (any (lbound(x) /= [5, -2, 99])) stop 2
++
++ select rank (x)
++ rank(3)
++ if (any (lbound(x) /= [5, -2, 99])) stop 3
++ end select
++
++ select rank (x) ! { dg-error "pointer or allocatable selector at .2." }
++ rank(*) ! { dg-error "pointer or allocatable selector at .2." }
++ if (rank(x) /= 1) stop 4
++ if (lbound(x, 1) /= 1) stop 5
++ end select
++ end
++
++ subroutine bar(x)
++ integer :: x(..)
++ if (rank(x) /= 3) stop 6
++ if (any (lbound(x) /= 1)) stop 7
++
++ select rank (x)
++ rank(3)
++ if (any (lbound(x) /= 1)) stop 8
++ end select
++
++ select rank (x)
++ rank(*)
++ if (rank(x) /= 1) stop 9
++ if (lbound(x, 1) /= 1) stop 10
++ end select
++ end
++end
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gnat.dg/opt102.adb
+@@ -0,0 +1,10 @@
++-- { dg-do run }
++-- { dg-options "-O2 -gnata" }
++
++with Opt102_Pkg; use Opt102_Pkg;
++
++procedure Opt102 is
++ I, F : aliased Integer;
++begin
++ I := Get (Two, F'Access, null);
++end;
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gnat.dg/opt102_pkg.adb
+@@ -0,0 +1,12 @@
++package body Opt102_Pkg is
++
++ function Get (E : Enum; F, M : access Integer) return Integer is
++ begin
++ case E is
++ when One => return 0;
++ when Two => return F.all;
++ when Three => return M.all;
++ end case;
++ end;
++
++end Opt102_Pkg;
+new file mode 100644
+--- /dev/null
++++ b/src/gcc/testsuite/gnat.dg/opt102_pkg.ads
+@@ -0,0 +1,10 @@
++package Opt102_Pkg is
++
++ type Enum is (One, Two, Three);
++
++ function Get (E : Enum; F, M : access Integer) return Integer
++ with Pre => (E = One) = (F = null and M = null) and
++ (E = Two) = (F /= null) and
++ (E = Three) = (M /= null);
++
++end Opt102_Pkg;
+--- a/src/gcc/testsuite/lib/target-supports.exp
++++ b/src/gcc/testsuite/lib/target-supports.exp
+@@ -10597,7 +10597,7 @@ proc check_effective_target_aarch64_tiny { } {
+ # various architecture extensions via the .arch_extension pseudo-op.
+
+ foreach { aarch64_ext } { "fp" "simd" "crypto" "crc" "lse" "dotprod" "sve"
+- "i8mm" "f32mm" "f64mm" "bf16" "sb" "sve2" } {
++ "i8mm" "f32mm" "f64mm" "bf16" "sb" "sve2" "ls64" } {
+ eval [string map [list FUNC $aarch64_ext] {
+ proc check_effective_target_aarch64_asm_FUNC_ok { } {
+ if { [istarget aarch64*-*-*] } {
+--- a/src/gcc/tree-ssa-ccp.cc
++++ b/src/gcc/tree-ssa-ccp.cc
+@@ -1552,6 +1552,8 @@ bit_value_binop (enum tree_code code, signop sgn, int width,
+ *mask = wi::lrotate (r1mask, shift, width);
+ *val = wi::lrotate (r1val, shift, width);
+ }
++ *mask = wi::ext (*mask, width, sgn);
++ *val = wi::ext (*val, width, sgn);
+ }
+ }
+ else if (wi::ltu_p (r2val | r2mask, width)
+@@ -1593,8 +1595,8 @@ bit_value_binop (enum tree_code code, signop sgn, int width,
+ /* Accumulate the result. */
+ res_mask |= tmp_mask | (res_val ^ tmp_val);
+ }
+- *val = wi::bit_and_not (res_val, res_mask);
+- *mask = res_mask;
++ *val = wi::ext (wi::bit_and_not (res_val, res_mask), width, sgn);
++ *mask = wi::ext (res_mask, width, sgn);
+ }
+ break;
+
+--- a/src/gcc/tree-ssa-loop-ivcanon.cc
++++ b/src/gcc/tree-ssa-loop-ivcanon.cc
+@@ -1487,15 +1487,16 @@ tree_unroll_loops_completely (bool may_increase_size, bool unroll_outer)
+ }
+ BITMAP_FREE (fathers);
+
++ /* Clean up the information about numbers of iterations, since
++ complete unrolling might have invalidated it. */
++ scev_reset ();
++
+ /* This will take care of removing completely unrolled loops
+ from the loop structures so we can continue unrolling now
+ innermost loops. */
+ if (cleanup_tree_cfg ())
+ update_ssa (TODO_update_ssa_only_virtuals);
+
+- /* Clean up the information about numbers of iterations, since
+- complete unrolling might have invalidated it. */
+- scev_reset ();
+ if (flag_checking && loops_state_satisfies_p (LOOP_CLOSED_SSA))
+ verify_loop_closed_ssa (true);
+ }
+--- a/src/gcc/tree-ssa-strlen.cc
++++ b/src/gcc/tree-ssa-strlen.cc
+@@ -3361,7 +3361,8 @@ strlen_pass::handle_builtin_memcpy (built_in_function bcode)
+ && !integer_zerop (len))
+ {
+ maybe_warn_overflow (stmt, false, len, olddsi, false, true);
+- adjust_last_stmt (olddsi, stmt, false);
++ if (tree_fits_uhwi_p (len))
++ adjust_last_stmt (olddsi, stmt, false);
+ }
+
+ int idx = get_stridx (src, stmt);
+--- a/src/gcc/tree-vect-loop.cc
++++ b/src/gcc/tree-vect-loop.cc
+@@ -2874,7 +2874,7 @@ vect_analyze_loop_1 (class loop *loop, vec_info_shared *shared,
+ res ? "succeeded" : " failed",
+ GET_MODE_NAME (loop_vinfo->vector_mode));
+
+- if (!main_loop_vinfo && suggested_unroll_factor > 1)
++ if (res && !main_loop_vinfo && suggested_unroll_factor > 1)
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location,
+--- a/src/gcc/wide-int.h
++++ b/src/gcc/wide-int.h
+@@ -3169,9 +3169,11 @@ wi::lrotate (const T1 &x, const T2 &y, unsigned int width)
+ width = precision;
+ WI_UNARY_RESULT (T2) ymod = umod_trunc (y, width);
+ WI_UNARY_RESULT (T1) left = wi::lshift (x, ymod);
+- WI_UNARY_RESULT (T1) right = wi::lrshift (x, wi::sub (width, ymod));
++ WI_UNARY_RESULT (T1) right
++ = wi::lrshift (width != precision ? wi::zext (x, width) : x,
++ wi::sub (width, ymod));
+ if (width != precision)
+- return wi::zext (left, width) | wi::zext (right, width);
++ return wi::zext (left, width) | right;
+ return left | right;
+ }
+
+@@ -3186,10 +3188,11 @@ wi::rrotate (const T1 &x, const T2 &y, unsigned int width)
+ if (width == 0)
+ width = precision;
+ WI_UNARY_RESULT (T2) ymod = umod_trunc (y, width);
+- WI_UNARY_RESULT (T1) right = wi::lrshift (x, ymod);
++ WI_UNARY_RESULT (T1) right
++ = wi::lrshift (width != precision ? wi::zext (x, width) : x, ymod);
+ WI_UNARY_RESULT (T1) left = wi::lshift (x, wi::sub (width, ymod));
+ if (width != precision)
+- return wi::zext (left, width) | wi::zext (right, width);
++ return wi::zext (left, width) | right;
+ return left | right;
+ }
+
+--- a/src/libffi/ChangeLog
++++ b/src/libffi/ChangeLog
+@@ -1,3 +1,11 @@
++2023-05-09 Dan Horák <dan@danny.cz>
++
++ Backported from master:
++ 2023-05-06 Dan Horák <dan@danny.cz>
++
++ PR libffi/109447
++ * src/powerpc/ffi_linux64.c (ffi_prep_args64): Update arg.f128 pointer.
++
+ 2023-05-08 Release Manager
+
+ * GCC 12.3.0 released.
+--- a/src/libffi/src/powerpc/ffi_linux64.c
++++ b/src/libffi/src/powerpc/ffi_linux64.c
+@@ -680,7 +680,7 @@ ffi_prep_args64 (extended_cif *ecif, unsigned long *const stack)
+ {
+ if (vecarg_count < NUM_VEC_ARG_REGISTERS64
+ && i < nfixedargs)
+- memcpy (vec_base.f128++, arg.f128, sizeof (float128));
++ memcpy (vec_base.f128++, arg.f128++, sizeof (float128));
+ else
+ memcpy (next_arg.f128, arg.f128++, sizeof (float128));
+ if (++next_arg.f128 == gpr_end.f128)
+--- a/src/libgcc/ChangeLog
++++ b/src/libgcc/ChangeLog
+@@ -1,3 +1,19 @@
++2023-05-21 Iain Sandoe <iain@sandoe.co.uk>
++
++ Backported from master:
++ 2023-05-19 Iain Sandoe <iain@sandoe.co.uk>
++
++ * config.host: Arrange to set min Darwin OS versions from
++ the configured host version.
++ * config/darwin10-unwind-find-enc-func.c: Do not use current
++ headers, but declare the nexessary structures locally to the
++ versions in use for Mac OSX 10.6.
++ * config/t-darwin: Amend to handle configured min OS
++ versions.
++ * config/t-darwin-min-1: New.
++ * config/t-darwin-min-5: New.
++ * config/t-darwin-min-8: New.
++
+ 2023-05-08 Release Manager
+
+ * GCC 12.3.0 released.
+--- a/src/libgcc/config.host
++++ b/src/libgcc/config.host
+@@ -241,6 +241,24 @@ case ${host} in
+ ;;
+ esac
+ tmake_file="$tmake_file t-slibgcc-darwin"
++ # newer toolsets produce warnings when building for unsupported versions.
++ case ${host} in
++ *-*-darwin1[89]* | *-*-darwin2* )
++ tmake_file="t-darwin-min-8 $tmake_file"
++ ;;
++ *-*-darwin9* | *-*-darwin1[0-7]*)
++ tmake_file="t-darwin-min-5 $tmake_file"
++ ;;
++ *-*-darwin[4-8]*)
++ tmake_file="t-darwin-min-1 $tmake_file"
++ ;;
++ *)
++ # Fall back to configuring for the oldest system known to work with
++ # all archs and the current sources.
++ tmake_file="t-darwin-min-5 $tmake_file"
++ echo "Warning: libgcc configured to support macOS 10.5" 1>&2
++ ;;
++ esac
+ extra_parts="crt3.o libd10-uwfef.a crttms.o crttme.o libemutls_w.a"
+ ;;
+ *-*-dragonfly*)
+--- a/src/libgcc/config/darwin10-unwind-find-enc-func.c
++++ b/src/libgcc/config/darwin10-unwind-find-enc-func.c
+@@ -1,8 +1,34 @@
+-#include "tconfig.h"
+-#include "tsystem.h"
+-#include "unwind-dw2-fde.h"
+ #include "libgcc_tm.h"
+
++/* This shim is special, it needs to be built for Mac OSX 10.6
++ regardless of the current system version.
++ We must also build it to use the unwinder layout that was
++ present for 10.6 (and not update that).
++ So we copy the referenced structures from unwind-dw2-fde.h
++ to avoid pulling in newer system headers and/or changed
++ layouts. */
++struct dwarf_eh_bases
++{
++ void *tbase;
++ void *dbase;
++ void *func;
++};
++
++typedef int sword __attribute__ ((mode (SI)));
++typedef unsigned int uword __attribute__ ((mode (SI)));
++
++/* The first few fields of an FDE. */
++struct dwarf_fde
++{
++ uword length;
++ sword CIE_delta;
++ unsigned char pc_begin[];
++} __attribute__ ((packed, aligned (__alignof__ (void *))));
++
++typedef struct dwarf_fde fde;
++
++extern const fde * _Unwind_Find_FDE (void *, struct dwarf_eh_bases *);
++
+ void *
+ _darwin10_Unwind_FindEnclosingFunction (void *pc)
+ {
+@@ -10,5 +36,5 @@ _darwin10_Unwind_FindEnclosingFunction (void *pc)
+ const struct dwarf_fde *fde = _Unwind_Find_FDE (pc-1, &bases);
+ if (fde)
+ return bases.func;
+- return NULL;
++ return (void *) 0;
+ }
+--- a/src/libgcc/config/t-darwin
++++ b/src/libgcc/config/t-darwin
+@@ -1,15 +1,15 @@
+ # Set this as a minimum (unless overriden by arch t-files) since it's a
+ # reasonable lowest common denominator that works for all our archs.
+-HOST_LIBGCC2_CFLAGS += -mmacosx-version-min=10.4
++HOST_LIBGCC2_CFLAGS += $(DARWIN_MIN_LIB_VERSION)
+
+ crt3.o: $(srcdir)/config/darwin-crt3.c
+- $(crt_compile) -mmacosx-version-min=10.4 -c $<
++ $(crt_compile) $(DARWIN_MIN_CRT_VERSION) -c $<
+
+ crttms.o: $(srcdir)/config/darwin-crt-tm.c
+- $(crt_compile) -mmacosx-version-min=10.4 -DSTART -c $<
++ $(crt_compile) $(DARWIN_MIN_CRT_VERSION) -DSTART -c $<
+
+ crttme.o: $(srcdir)/config/darwin-crt-tm.c
+- $(crt_compile) -mmacosx-version-min=10.4 -DEND -c $<
++ $(crt_compile) $(DARWIN_MIN_CRT_VERSION) -DEND -c $<
+
+ # Make emutls weak so that we can deal with -static-libgcc, override the
+ # hidden visibility when this is present in libgcc_eh.
+@@ -25,6 +25,8 @@ libemutls_w.a: emutls_s.o
+ $(RANLIB_FOR_TARGET) $@
+
+ # Patch to __Unwind_Find_Enclosing_Function for Darwin10.
++# This needs to be built for darwin10, regardless of the current platform
++# version.
+ d10-uwfef.o: $(srcdir)/config/darwin10-unwind-find-enc-func.c libgcc_tm.h
+ $(crt_compile) -mmacosx-version-min=10.6 -c $<
+
+new file mode 100644
+--- /dev/null
++++ b/src/libgcc/config/t-darwin-min-1
+@@ -0,0 +1,3 @@
++# Support building with -mmacosx-version-min back to 10.1.
++DARWIN_MIN_LIB_VERSION = -mmacosx-version-min=10.4
++DARWIN_MIN_CRT_VERSION = -mmacosx-version-min=10.1
+new file mode 100644
+--- /dev/null
++++ b/src/libgcc/config/t-darwin-min-5
+@@ -0,0 +1,3 @@
++# Support building with -mmacosx-version-min back to 10.5.
++DARWIN_MIN_LIB_VERSION = -mmacosx-version-min=10.5
++DARWIN_MIN_CRT_VERSION = -mmacosx-version-min=10.5
+new file mode 100644
+--- /dev/null
++++ b/src/libgcc/config/t-darwin-min-8
+@@ -0,0 +1,3 @@
++# Support building with -mmacosx-version-min back to 10.8.
++DARWIN_MIN_LIB_VERSION = -mmacosx-version-min=10.8
++DARWIN_MIN_CRT_VERSION = -mmacosx-version-min=10.8
+--- a/src/libgo/Makefile.am
++++ b/src/libgo/Makefile.am
+@@ -417,6 +417,7 @@ toolexeclibgounicode_DATA = \
+ # Some internal packages are needed to bootstrap the gc toolchain.
+ toolexeclibgointernaldir = $(toolexeclibgodir)/internal
+ toolexeclibgointernal_DATA = \
++ internal/lazyregexp.gox \
+ internal/reflectlite.gox \
+ internal/unsafeheader.gox
+
+--- a/src/libgo/Makefile.in
++++ b/src/libgo/Makefile.in
+@@ -885,6 +885,7 @@ toolexeclibgounicode_DATA = \
+ # Some internal packages are needed to bootstrap the gc toolchain.
+ toolexeclibgointernaldir = $(toolexeclibgodir)/internal
+ toolexeclibgointernal_DATA = \
++ internal/lazyregexp.gox \
+ internal/reflectlite.gox \
+ internal/unsafeheader.gox
+
+--- a/src/libgo/go/internal/abi/abi.go
++++ b/src/libgo/go/internal/abi/abi.go
+@@ -17,10 +17,7 @@ package abi
+ // compile-time error.
+ //
+ // Implemented as a compile intrinsic.
+-func FuncPCABI0(f any) uintptr {
+- // The compiler should remove all calls.
+- panic("FuncPCABI0")
+-}
++func FuncPCABI0(f any) uintptr
+
+ // FuncPCABIInternal returns the entry PC of the function f. If f is a
+ // direct reference of a function, it must be defined as ABIInternal.
+@@ -29,7 +26,4 @@ func FuncPCABI0(f any) uintptr {
+ // the behavior is undefined.
+ //
+ // Implemented as a compile intrinsic.
+-func FuncPCABIInternal(f any) uintptr {
+- // The compiler should remove all calls.
+- panic("FuncPCABIInternal")
+-}
++func FuncPCABIInternal(f any) uintptr
+--- a/src/libgo/go/syscall/libcall_linux.go
++++ b/src/libgo/go/syscall/libcall_linux.go
+@@ -188,6 +188,14 @@ func Gettid() (tid int) {
+ //sys PivotRoot(newroot string, putold string) (err error)
+ //pivot_root(newroot *byte, putold *byte) _C_int
+
++// Used by golang.org/x/sys/unix.
++//sys prlimit(pid int, resource int, newlimit *Rlimit, oldlimit *Rlimit) (err error)
++//prlimit(pid Pid_t, resource _C_int, newlimit *Rlimit, oldlimit *Rlimit) _C_int
++
++func Prlimit(pid int, resource int, newlimit *Rlimit, oldlimit *Rlimit) error {
++ return prlimit(pid, resource, newlimit, oldlimit)
++}
++
+ //sys Removexattr(path string, attr string) (err error)
+ //removexattr(path *byte, name *byte) _C_int
+
+--- a/src/libgomp/ChangeLog
++++ b/src/libgomp/ChangeLog
+@@ -1,3 +1,69 @@
++2023-09-01 Tobias Burnus <tobias@codesourcery.com>
++
++ Backported from master:
++ 2023-08-19 Tobias Burnus <tobias@codesourcery.com>
++
++ PR middle-end/111017
++ * testsuite/libgomp.c-c++-common/non-rect-loop-1.c: New test.
++
++2023-06-28 Thomas Schwinge <thomas@codesourcery.com>
++
++ Backported from master:
++ 2023-06-02 Thomas Schwinge <thomas@codesourcery.com>
++
++ PR testsuite/66005
++ * testsuite/lib/libgomp.exp: 'flock' through stdout.
++ * testsuite/flock: New.
++ * configure.ac (FLOCK): Point to that if no 'flock' available, but
++ 'perl' is.
++ * configure: Regenerate.
++
++2023-06-28 Thomas Schwinge <thomas@codesourcery.com>
++
++ Backported from master:
++ 2023-05-15 Thomas Schwinge <thomas@codesourcery.com>
++
++ PR testsuite/66005
++ * configure.ac: Look for 'flock'.
++ * testsuite/Makefile.am (gcc_test_parallel_slots): Enable parallel testing.
++ * testsuite/config/default.exp: Don't 'load_lib "standard.exp"' here...
++ * testsuite/lib/libgomp.exp: ... but here, instead.
++ (libgomp_load): Override for parallel testing.
++ * testsuite/libgomp-site-extra.exp.in (FLOCK): Set.
++ * configure: Regenerate.
++ * Makefile.in: Regenerate.
++ * testsuite/Makefile.in: Regenerate.
++
++2023-06-28 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
++
++ Backported from master:
++ 2023-05-15 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
++ Thomas Schwinge <thomas@codesourcery.com>
++
++ PR testsuite/66005
++ * testsuite/Makefile.am (PWD_COMMAND): New variable.
++ (%/site.exp): New target.
++ (check_p_numbers0, check_p_numbers1, check_p_numbers2)
++ (check_p_numbers3, check_p_numbers4, check_p_numbers5)
++ (check_p_numbers6, check_p_numbers, gcc_test_parallel_slots)
++ (check_p_subdirs)
++ (check_DEJAGNU_libgomp_targets): New variables.
++ ($(check_DEJAGNU_libgomp_targets)): New target.
++ ($(check_DEJAGNU_libgomp_targets)): New dependency.
++ (check-DEJAGNU $(check_DEJAGNU_libgomp_targets)): New targets.
++ * testsuite/Makefile.in: Regenerate.
++ * testsuite/lib/libgomp.exp: For parallel testing,
++ 'load_file ../libgomp-test-support.exp'.
++
++2023-06-28 Thomas Schwinge <thomas@codesourcery.com>
++
++ Backported from master:
++ 2023-05-08 Thomas Schwinge <thomas@codesourcery.com>
++
++ * testsuite/libgomp.c++/c++.exp: Use 'lang_include_flags' instead
++ of 'libstdcxx_includes'.
++ * testsuite/libgomp.oacc-c++/c++.exp: Likewise.
++
+ 2023-05-08 Release Manager
+
+ * GCC 12.3.0 released.
+--- a/src/libgomp/Makefile.in
++++ b/src/libgomp/Makefile.in
+@@ -384,6 +384,7 @@ EXEEXT = @EXEEXT@
+ FC = @FC@
+ FCFLAGS = @FCFLAGS@
+ FGREP = @FGREP@
++FLOCK = @FLOCK@
+ GREP = @GREP@
+ HSA_RUNTIME_INCLUDE = @HSA_RUNTIME_INCLUDE@
+ HSA_RUNTIME_LIB = @HSA_RUNTIME_LIB@
+--- a/src/libgomp/configure
++++ b/src/libgomp/configure
+@@ -656,6 +656,7 @@ tmake_file
+ XLDFLAGS
+ XCFLAGS
+ config_path
++FLOCK
+ CPU_COUNT
+ LIBGOMP_BUILD_VERSIONED_SHLIB_SUN_FALSE
+ LIBGOMP_BUILD_VERSIONED_SHLIB_SUN_TRUE
+@@ -11431,7 +11432,7 @@ else
+ lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
+ lt_status=$lt_dlunknown
+ cat > conftest.$ac_ext <<_LT_EOF
+-#line 11434 "configure"
++#line 11435 "configure"
+ #include "confdefs.h"
+
+ #if HAVE_DLFCN_H
+@@ -11537,7 +11538,7 @@ else
+ lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
+ lt_status=$lt_dlunknown
+ cat > conftest.$ac_ext <<_LT_EOF
+-#line 11540 "configure"
++#line 11541 "configure"
+ #include "confdefs.h"
+
+ #if HAVE_DLFCN_H
+@@ -16663,6 +16664,91 @@ $as_echo "unable to detect (assuming 1)" >&6; }
+ fi
+
+
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for flock implementation" >&5
++$as_echo "$as_me: checking for flock implementation" >&6;}
++for ac_prog in flock
++do
++ # Extract the first word of "$ac_prog", so it can be a program name with args.
++set dummy $ac_prog; ac_word=$2
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_FLOCK+:} false; then :
++ $as_echo_n "(cached) " >&6
++else
++ if test -n "$FLOCK"; then
++ ac_cv_prog_FLOCK="$FLOCK" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++ IFS=$as_save_IFS
++ test -z "$as_dir" && as_dir=.
++ for ac_exec_ext in '' $ac_executable_extensions; do
++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
++ ac_cv_prog_FLOCK="$ac_prog"
++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
++ break 2
++ fi
++done
++ done
++IFS=$as_save_IFS
++
++fi
++fi
++FLOCK=$ac_cv_prog_FLOCK
++if test -n "$FLOCK"; then
++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $FLOCK" >&5
++$as_echo "$FLOCK" >&6; }
++else
++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
++fi
++
++
++ test -n "$FLOCK" && break
++done
++
++# Fallback if 'perl' is available.
++if test -z "$FLOCK"; then
++ # Extract the first word of "perl", so it can be a program name with args.
++set dummy perl; ac_word=$2
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_FLOCK+:} false; then :
++ $as_echo_n "(cached) " >&6
++else
++ if test -n "$FLOCK"; then
++ ac_cv_prog_FLOCK="$FLOCK" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++ IFS=$as_save_IFS
++ test -z "$as_dir" && as_dir=.
++ for ac_exec_ext in '' $ac_executable_extensions; do
++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
++ ac_cv_prog_FLOCK="$srcdir/testsuite/flock"
++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
++ break 2
++ fi
++done
++ done
++IFS=$as_save_IFS
++
++fi
++fi
++FLOCK=$ac_cv_prog_FLOCK
++if test -n "$FLOCK"; then
++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $FLOCK" >&5
++$as_echo "$FLOCK" >&6; }
++else
++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
++fi
++
++
++fi
++
+ # Get target configury.
+ . ${srcdir}/configure.tgt
+ CFLAGS="$save_CFLAGS $XCFLAGS"
+--- a/src/libgomp/configure.ac
++++ b/src/libgomp/configure.ac
+@@ -339,6 +339,13 @@ fi
+ AX_COUNT_CPUS
+ AC_SUBST(CPU_COUNT)
+
++AC_MSG_NOTICE([checking for flock implementation])
++AC_CHECK_PROGS(FLOCK, flock)
++# Fallback if 'perl' is available.
++if test -z "$FLOCK"; then
++ AC_CHECK_PROG(FLOCK, perl, $srcdir/testsuite/flock)
++fi
++
+ # Get target configury.
+ . ${srcdir}/configure.tgt
+ CFLAGS="$save_CFLAGS $XCFLAGS"
+--- a/src/libgomp/testsuite/Makefile.am
++++ b/src/libgomp/testsuite/Makefile.am
+@@ -12,6 +12,8 @@ _RUNTEST = $(shell if test -f $(top_srcdir)/../dejagnu/runtest; then \
+ echo $(top_srcdir)/../dejagnu/runtest; else echo runtest; fi)
+ RUNTESTDEFAULTFLAGS = --tool $$tool --srcdir $$srcdir
+
++PWD_COMMAND = $${PWDCMD-pwd}
++
+ EXTRA_DEJAGNU_SITE_CONFIG = libgomp-site-extra.exp
+
+ # Instead of directly in ../testsuite/libgomp-test-support.exp.in, the
+@@ -25,17 +27,6 @@ libgomp-test-support.exp: libgomp-test-support.pt.exp Makefile
+ 'set offload_additional_lib_paths "$(offload_additional_lib_paths)"'
+ mv $@.tmp $@
+
+-check-DEJAGNU: site.exp
+- srcdir='$(srcdir)'; export srcdir; \
+- EXPECT=$(EXPECT); export EXPECT; \
+- if $(SHELL) -c "$(_RUNTEST) --version" > /dev/null 2>&1; then \
+- exit_status=0; l='$(PACKAGE)'; for tool in $$l; do \
+- if $(_RUNTEST) $(AM_RUNTESTFLAGS) $(RUNTESTDEFAULTFLAGS) $(RUNTESTFLAGS); \
+- then :; else exit_status=1; fi; \
+- done; \
+- else echo "WARNING: could not find '$(_RUNTEST)'" 1>&2; :;\
+- fi; \
+- exit $$exit_status
+ site.exp: Makefile $(EXTRA_DEJAGNU_SITE_CONFIG)
+ @echo 'Making a new site.exp file ...'
+ @echo '## these variables are automatically generated by make ##' >site.tmp
+@@ -63,6 +54,72 @@ site.exp: Makefile $(EXTRA_DEJAGNU_SITE_CONFIG)
+ @test ! -f site.exp || mv site.exp site.bak
+ @mv site.tmp site.exp
+
++%/site.exp: site.exp
++ -@test -d $* || mkdir $*
++ @srcdir=`cd $(srcdir); ${PWD_COMMAND}`;
++ @objdir=`${PWD_COMMAND}`/$*; \
++ sed -e "s|^set srcdir .*$$|set srcdir $$srcdir|" \
++ -e "s|^set objdir .*$$|set objdir $$objdir|" \
++ site.exp > $*/site.exp.tmp
++ @-rm -f $*/site.bak
++ @test ! -f $*/site.exp || mv $*/site.exp $*/site.bak
++ @mv $*/site.exp.tmp $*/site.exp
++
++check_p_numbers0:=1 2 3 4 5 6 7 8 9
++check_p_numbers1:=0 $(check_p_numbers0)
++check_p_numbers2:=$(foreach i,$(check_p_numbers0),$(addprefix $(i),$(check_p_numbers1)))
++check_p_numbers3:=$(addprefix 0,$(check_p_numbers1)) $(check_p_numbers2)
++check_p_numbers4:=$(foreach i,$(check_p_numbers0),$(addprefix $(i),$(check_p_numbers3)))
++check_p_numbers5:=$(addprefix 0,$(check_p_numbers3)) $(check_p_numbers4)
++check_p_numbers6:=$(foreach i,$(check_p_numbers0),$(addprefix $(i),$(check_p_numbers5)))
++check_p_numbers:=$(check_p_numbers0) $(check_p_numbers2) $(check_p_numbers4) $(check_p_numbers6)
++# If unable to serialize execution testing, use just one parallel slot.
++gcc_test_parallel_slots:=$(if $(FLOCK),$(if $(GCC_TEST_PARALLEL_SLOTS),$(GCC_TEST_PARALLEL_SLOTS),19),1)
++check_p_subdirs=$(wordlist 1,$(gcc_test_parallel_slots),$(check_p_numbers))
++check_DEJAGNU_libgomp_targets = $(addprefix check-DEJAGNUlibgomp,$(check_p_subdirs))
++$(check_DEJAGNU_libgomp_targets): check-DEJAGNUlibgomp%: libgomp%/site.exp
++
++check-DEJAGNU $(check_DEJAGNU_libgomp_targets): check-DEJAGNU%: site.exp
++ $(if $*,@)AR="$(AR)"; export AR; \
++ RANLIB="$(RANLIB)"; export RANLIB; \
++ if [ -z "$*" ] && [ -n "$(filter -j%, $(MFLAGS))" ]; then \
++ rm -rf libgomp-parallel || true; \
++ mkdir libgomp-parallel; \
++ $(MAKE) $(AM_MAKEFLAGS) $(check_DEJAGNU_libgomp_targets); \
++ rm -rf libgomp-parallel || true; \
++ for idx in $(check_p_subdirs); do \
++ if [ -d libgomp$$idx ]; then \
++ mv -f libgomp$$idx/libgomp.sum libgomp$$idx/libgomp.sum.sep; \
++ mv -f libgomp$$idx/libgomp.log libgomp$$idx/libgomp.log.sep; \
++ fi; \
++ done; \
++ $(SHELL) $(srcdir)/../../contrib/dg-extract-results.sh \
++ libgomp[0-9]*/libgomp.sum.sep > libgomp.sum; \
++ $(SHELL) $(srcdir)/../../contrib/dg-extract-results.sh -L \
++ libgomp[0-9]*/libgomp.log.sep > libgomp.log; \
++ exit 0; \
++ fi; \
++ srcdir=`$(am__cd) $(srcdir) && pwd`; export srcdir; \
++ EXPECT=$(EXPECT); export EXPECT; \
++ runtest=$(_RUNTEST); \
++ if [ -z "$$runtest" ]; then runtest=runtest; fi; \
++ tool=libgomp; \
++ if [ -n "$*" ]; then \
++ if [ -f libgomp-parallel/finished ]; then rm -rf "$*"; exit 0; fi; \
++ GCC_RUNTEST_PARALLELIZE_DIR=`${PWD_COMMAND}`/libgomp-parallel; \
++ export GCC_RUNTEST_PARALLELIZE_DIR; \
++ cd "$*"; \
++ fi; \
++ if $(SHELL) -c "$$runtest --version" > /dev/null 2>&1; then \
++ $$runtest $(AM_RUNTESTFLAGS) $(RUNTESTDEFAULTFLAGS) \
++ $(RUNTESTFLAGS); \
++ if [ -n "$*" ]; then \
++ touch $$GCC_RUNTEST_PARALLELIZE_DIR/finished; \
++ fi; \
++ else \
++ echo "WARNING: could not find \`runtest'" 1>&2; :;\
++ fi
++
+ distclean-DEJAGNU:
+ -rm -f site.exp site.bak
+ -l='$(PACKAGE)'; for tool in $$l; do \
+--- a/src/libgomp/testsuite/Makefile.in
++++ b/src/libgomp/testsuite/Makefile.in
+@@ -162,6 +162,7 @@ EXEEXT = @EXEEXT@
+ FC = @FC@
+ FCFLAGS = @FCFLAGS@
+ FGREP = @FGREP@
++FLOCK = @FLOCK@
+ GREP = @GREP@
+ HSA_RUNTIME_INCLUDE = @HSA_RUNTIME_INCLUDE@
+ HSA_RUNTIME_LIB = @HSA_RUNTIME_LIB@
+@@ -310,7 +311,20 @@ _RUNTEST = $(shell if test -f $(top_srcdir)/../dejagnu/runtest; then \
+ echo $(top_srcdir)/../dejagnu/runtest; else echo runtest; fi)
+
+ RUNTESTDEFAULTFLAGS = --tool $$tool --srcdir $$srcdir
++PWD_COMMAND = $${PWDCMD-pwd}
+ EXTRA_DEJAGNU_SITE_CONFIG = libgomp-site-extra.exp
++check_p_numbers0 := 1 2 3 4 5 6 7 8 9
++check_p_numbers1 := 0 $(check_p_numbers0)
++check_p_numbers2 := $(foreach i,$(check_p_numbers0),$(addprefix $(i),$(check_p_numbers1)))
++check_p_numbers3 := $(addprefix 0,$(check_p_numbers1)) $(check_p_numbers2)
++check_p_numbers4 := $(foreach i,$(check_p_numbers0),$(addprefix $(i),$(check_p_numbers3)))
++check_p_numbers5 := $(addprefix 0,$(check_p_numbers3)) $(check_p_numbers4)
++check_p_numbers6 := $(foreach i,$(check_p_numbers0),$(addprefix $(i),$(check_p_numbers5)))
++check_p_numbers := $(check_p_numbers0) $(check_p_numbers2) $(check_p_numbers4) $(check_p_numbers6)
++# If unable to serialize execution testing, use just one parallel slot.
++gcc_test_parallel_slots := $(if $(FLOCK),$(if $(GCC_TEST_PARALLEL_SLOTS),$(GCC_TEST_PARALLEL_SLOTS),19),1)
++check_p_subdirs = $(wordlist 1,$(gcc_test_parallel_slots),$(check_p_numbers))
++check_DEJAGNU_libgomp_targets = $(addprefix check-DEJAGNUlibgomp,$(check_p_subdirs))
+ all: all-am
+
+ .SUFFIXES:
+@@ -485,17 +499,6 @@ libgomp-test-support.exp: libgomp-test-support.pt.exp Makefile
+ 'set offload_additional_lib_paths "$(offload_additional_lib_paths)"'
+ mv $@.tmp $@
+
+-check-DEJAGNU: site.exp
+- srcdir='$(srcdir)'; export srcdir; \
+- EXPECT=$(EXPECT); export EXPECT; \
+- if $(SHELL) -c "$(_RUNTEST) --version" > /dev/null 2>&1; then \
+- exit_status=0; l='$(PACKAGE)'; for tool in $$l; do \
+- if $(_RUNTEST) $(AM_RUNTESTFLAGS) $(RUNTESTDEFAULTFLAGS) $(RUNTESTFLAGS); \
+- then :; else exit_status=1; fi; \
+- done; \
+- else echo "WARNING: could not find '$(_RUNTEST)'" 1>&2; :;\
+- fi; \
+- exit $$exit_status
+ site.exp: Makefile $(EXTRA_DEJAGNU_SITE_CONFIG)
+ @echo 'Making a new site.exp file ...'
+ @echo '## these variables are automatically generated by make ##' >site.tmp
+@@ -523,6 +526,59 @@ site.exp: Makefile $(EXTRA_DEJAGNU_SITE_CONFIG)
+ @test ! -f site.exp || mv site.exp site.bak
+ @mv site.tmp site.exp
+
++%/site.exp: site.exp
++ -@test -d $* || mkdir $*
++ @srcdir=`cd $(srcdir); ${PWD_COMMAND}`;
++ @objdir=`${PWD_COMMAND}`/$*; \
++ sed -e "s|^set srcdir .*$$|set srcdir $$srcdir|" \
++ -e "s|^set objdir .*$$|set objdir $$objdir|" \
++ site.exp > $*/site.exp.tmp
++ @-rm -f $*/site.bak
++ @test ! -f $*/site.exp || mv $*/site.exp $*/site.bak
++ @mv $*/site.exp.tmp $*/site.exp
++$(check_DEJAGNU_libgomp_targets): check-DEJAGNUlibgomp%: libgomp%/site.exp
++
++check-DEJAGNU $(check_DEJAGNU_libgomp_targets): check-DEJAGNU%: site.exp
++ $(if $*,@)AR="$(AR)"; export AR; \
++ RANLIB="$(RANLIB)"; export RANLIB; \
++ if [ -z "$*" ] && [ -n "$(filter -j%, $(MFLAGS))" ]; then \
++ rm -rf libgomp-parallel || true; \
++ mkdir libgomp-parallel; \
++ $(MAKE) $(AM_MAKEFLAGS) $(check_DEJAGNU_libgomp_targets); \
++ rm -rf libgomp-parallel || true; \
++ for idx in $(check_p_subdirs); do \
++ if [ -d libgomp$$idx ]; then \
++ mv -f libgomp$$idx/libgomp.sum libgomp$$idx/libgomp.sum.sep; \
++ mv -f libgomp$$idx/libgomp.log libgomp$$idx/libgomp.log.sep; \
++ fi; \
++ done; \
++ $(SHELL) $(srcdir)/../../contrib/dg-extract-results.sh \
++ libgomp[0-9]*/libgomp.sum.sep > libgomp.sum; \
++ $(SHELL) $(srcdir)/../../contrib/dg-extract-results.sh -L \
++ libgomp[0-9]*/libgomp.log.sep > libgomp.log; \
++ exit 0; \
++ fi; \
++ srcdir=`$(am__cd) $(srcdir) && pwd`; export srcdir; \
++ EXPECT=$(EXPECT); export EXPECT; \
++ runtest=$(_RUNTEST); \
++ if [ -z "$$runtest" ]; then runtest=runtest; fi; \
++ tool=libgomp; \
++ if [ -n "$*" ]; then \
++ if [ -f libgomp-parallel/finished ]; then rm -rf "$*"; exit 0; fi; \
++ GCC_RUNTEST_PARALLELIZE_DIR=`${PWD_COMMAND}`/libgomp-parallel; \
++ export GCC_RUNTEST_PARALLELIZE_DIR; \
++ cd "$*"; \
++ fi; \
++ if $(SHELL) -c "$$runtest --version" > /dev/null 2>&1; then \
++ $$runtest $(AM_RUNTESTFLAGS) $(RUNTESTDEFAULTFLAGS) \
++ $(RUNTESTFLAGS); \
++ if [ -n "$*" ]; then \
++ touch $$GCC_RUNTEST_PARALLELIZE_DIR/finished; \
++ fi; \
++ else \
++ echo "WARNING: could not find \`runtest'" 1>&2; :;\
++ fi
++
+ distclean-DEJAGNU:
+ -rm -f site.exp site.bak
+ -l='$(PACKAGE)'; for tool in $$l; do \
+--- a/src/libgomp/testsuite/config/default.exp
++++ b/src/libgomp/testsuite/config/default.exp
+@@ -13,5 +13,3 @@
+ # You should have received a copy of the GNU General Public License
+ # along with this program; see the file COPYING3. If not see
+ # <http://www.gnu.org/licenses/>.
+-
+-load_lib "standard.exp"
+new file mode 100755
+--- /dev/null
++++ b/src/libgomp/testsuite/flock
+@@ -0,0 +1,17 @@
++#!/usr/bin/env perl
++
++use strict;
++use warnings;
++
++# Only arguments '--exclusive 1' exactly are supported.
++(@ARGV == 2) or die;
++my $mode = shift;
++($mode eq "--exclusive") or die;
++my $fd = shift;
++($fd eq "1") or die;
++
++use Fcntl ':flock';
++
++open(my $fh, '>&=', 1) or die "open: $!";
++
++flock($fh, LOCK_EX) or die "flock: $!";
+--- a/src/libgomp/testsuite/lib/libgomp.exp
++++ b/src/libgomp/testsuite/lib/libgomp.exp
+@@ -9,6 +9,7 @@ proc load_gcc_lib { filename } {
+ }
+
+ load_lib dg.exp
++load_lib standard.exp
+
+ # Required to use gcc-dg.exp - however, the latter should NOT be
+ # loaded until ${tool}_target_compile is defined since it uses that
+@@ -40,7 +41,12 @@ load_gcc_lib torture-options.exp
+ load_gcc_lib fortran-modules.exp
+
+ # Try to load a test support file, built during libgomp configuration.
+-load_file libgomp-test-support.exp
++# Search in '..' vs. '.' to support parallel vs. sequential testing.
++if [info exists ::env(GCC_RUNTEST_PARALLELIZE_DIR)] {
++ load_file ../libgomp-test-support.exp
++} else {
++ load_file libgomp-test-support.exp
++}
+
+ set dg-do-what-default run
+
+@@ -319,6 +325,36 @@ proc libgomp_option_proc { option } {
+ }
+ }
+
++if ![info exists ::env(GCC_RUNTEST_PARALLELIZE_DIR)] {
++ # No parallel testing.
++} elseif { $FLOCK == "" } {
++ # Using just one parallel slot.
++} else {
++ # Using several parallel slots. Override DejaGnu
++ # 'standard.exp:${tool}_load'...
++ rename libgomp_load standard_libgomp_load
++ proc libgomp_load { program args } {
++ # ... in order to serialize execution testing via an exclusive lock.
++ # We use stdout, as per <https://perldoc.perl.org/functions/flock>
++ # "[...] FILEHANDLE [...] be open with write intent to use LOCK_EX".
++ set lock_file ../lock
++ set lock_kind --exclusive
++ set lock_fd [open $lock_file a+]
++ set lock_clock_begin [clock seconds]
++ global FLOCK
++ exec $FLOCK $lock_kind 1 >@ $lock_fd
++ set lock_clock_end [clock seconds]
++ verbose -log "Got ${FLOCK}('$lock_file', '$lock_kind') at [clock format $lock_clock_end] after [expr $lock_clock_end - $lock_clock_begin] s" 2
++
++ set result [standard_libgomp_load $program $args]
++
++ # Unlock (implicit with 'close').
++ close $lock_fd
++
++ return $result
++ }
++}
++
+ # Translate offload target to OpenACC device type. Return the empty string if
+ # not supported, and 'host' for offload target 'disable'.
+ proc offload_target_to_openacc_device_type { offload_target } {
+--- a/src/libgomp/testsuite/libgomp-site-extra.exp.in
++++ b/src/libgomp/testsuite/libgomp-site-extra.exp.in
+@@ -1 +1,2 @@
++set FLOCK {@FLOCK@}
+ set GCC_UNDER_TEST {@CC@}
+--- a/src/libgomp/testsuite/libgomp.c++/c++.exp
++++ b/src/libgomp/testsuite/libgomp.c++/c++.exp
+@@ -66,13 +66,12 @@ if { $lang_test_file_found } {
+
+ set flags_file "${blddir}/../libstdc++-v3/scripts/testsuite_flags"
+ if { [file exists $flags_file] } {
+- set libstdcxx_includes [exec sh $flags_file --build-includes]
+- } else {
+- set libstdcxx_includes ""
++ set lang_source_re {^.*\.[cC]$}
++ set lang_include_flags [exec sh $flags_file --build-includes]
+ }
+
+ # Main loop.
+- dg-runtest $tests "" "$libstdcxx_includes $DEFAULT_CFLAGS"
++ dg-runtest $tests "" $DEFAULT_CFLAGS
+ }
+
+ # See above.
+new file mode 100644
+--- /dev/null
++++ b/src/libgomp/testsuite/libgomp.c-c++-common/non-rect-loop-1.c
+@@ -0,0 +1,72 @@
++/* PR middle-end/111017 */
++
++#include <omp.h>
++
++#define DIM 32
++#define N (DIM*DIM)
++
++int
++main ()
++{
++ int a[N], b[N], c[N];
++ int dim = DIM;
++
++ for (int i = 0; i < N; i++)
++ {
++ a[i] = 3*i;
++ b[i] = 7*i;
++ c[i] = 42;
++ }
++
++ #pragma omp parallel for collapse(2)
++ for (int i = 0; i < DIM; i++)
++ for (int j = (i*DIM); j < (i*DIM + DIM); j++)
++ c[j] = a[j] + b[j];
++
++ for (int i = 0; i < DIM; i++)
++ for (int j = (i*DIM); j < (i*DIM + DIM); j++)
++ if (c[j] != a[j] + b[j] || c[j] != 3*j +7*j)
++ __builtin_abort ();
++ for (int i = 0; i < N; i++)
++ c[i] = 42;
++
++ #pragma omp parallel for collapse(2)
++ for (int i = 0; i < dim; i++)
++ for (int j = (i*dim); j < (i*dim + dim); j++)
++ c[j] = a[j] + b[j];
++
++ for (int i = 0; i < DIM; i++)
++ for (int j = (i*DIM); j < (i*DIM + DIM); j++)
++ if (c[j] != a[j] + b[j] || c[j] != 3*j +7*j)
++ __builtin_abort ();
++ for (int i = 0; i < N; i++)
++ c[i] = 42;
++
++ for (int dev = 0; dev <= omp_get_num_devices(); dev++)
++ {
++ #pragma omp target teams loop device(dev) map(to:a,b) map(from:c)
++ for (int i = 0; i < DIM; i++)
++ for (int j = (i*DIM); j < (i*DIM + DIM); j++)
++ c[j] = a[j] + b[j];
++
++ for (int i = 0; i < DIM; i++)
++ for (int j = (i*DIM); j < (i*DIM + DIM); j++)
++ if (c[j] != a[j] + b[j] || c[j] != 3*j +7*j)
++ __builtin_abort ();
++ for (int i = 0; i < N; i++)
++ c[i] = 42;
++
++ #pragma omp target teams loop device(dev) map(to:a,b) map(from:c)
++ for (int i = 0; i < dim; i++)
++ for (int j = (i*dim); j < (i*dim + dim); j++)
++ c[j] = a[j] + b[j];
++
++ for (int i = 0; i < DIM; i++)
++ for (int j = (i*DIM); j < (i*DIM + DIM); j++)
++ if (c[j] != a[j] + b[j] || c[j] != 3*j +7*j)
++ __builtin_abort ();
++ for (int i = 0; i < N; i++)
++ c[i] = 42;
++ }
++ return 0;
++}
+--- a/src/libgomp/testsuite/libgomp.oacc-c++/c++.exp
++++ b/src/libgomp/testsuite/libgomp.oacc-c++/c++.exp
+@@ -72,9 +72,8 @@ if { $lang_test_file_found } {
+
+ set flags_file "${blddir}/../libstdc++-v3/scripts/testsuite_flags"
+ if { [file exists $flags_file] } {
+- set libstdcxx_includes [exec sh $flags_file --build-includes]
+- } else {
+- set libstdcxx_includes ""
++ set lang_source_re {^.*\.[cC]$}
++ set lang_include_flags [exec sh $flags_file --build-includes]
+ }
+
+ # Test with all available offload targets, and with offloading disabled.
+@@ -147,7 +146,7 @@ if { $lang_test_file_found } {
+ }
+ }
+
+- gcc-dg-runtest $tests "$tagopt" "$libstdcxx_includes"
++ gcc-dg-runtest $tests "$tagopt" ""
+ }
+ unset offload_target
+ } else {
+--- a/src/libphobos/ChangeLog
++++ b/src/libphobos/ChangeLog
+@@ -1,3 +1,7 @@
++2023-06-06 Iain Buclaw <ibuclaw@gdcproject.org>
++
++ * src/MERGE: Merge upstream phobos 8e8aaae50.
++
+ 2023-05-08 Release Manager
+
+ * GCC 12.3.0 released.
+--- a/src/libphobos/src/MERGE
++++ b/src/libphobos/src/MERGE
+@@ -1,4 +1,4 @@
+-5fef0d28fc873fb5a0dbfb9149759d76a7b9f1b7
++8e8aaae5080ccc2e0a2202cbe9778dca96496a95
+
+ The first line of this file holds the git revision number of the last
+ merge done from the dlang/phobos repository.
+--- a/src/libphobos/src/std/container/array.d
++++ b/src/libphobos/src/std/container/array.d
+@@ -412,9 +412,9 @@ if (!is(immutable T == immutable bool))
+ .destroy(e);
+
+ static if (hasIndirections!T)
+- GC.removeRange(_payload.ptr);
++ GC.removeRange(cast(void*) _payload.ptr);
+
+- free(_payload.ptr);
++ free(cast(void*) _payload.ptr);
+ }
+
+ this(this) @disable;
+@@ -489,14 +489,14 @@ if (!is(immutable T == immutable bool))
+ auto newPayload = newPayloadPtr[0 .. oldLength];
+
+ // copy old data over to new array
+- memcpy(newPayload.ptr, _payload.ptr, T.sizeof * oldLength);
++ memcpy(cast(void*) newPayload.ptr, cast(void*) _payload.ptr, T.sizeof * oldLength);
+ // Zero out unused capacity to prevent gc from seeing false pointers
+- memset(newPayload.ptr + oldLength,
++ memset( cast(void*) (newPayload.ptr + oldLength),
+ 0,
+ (elements - oldLength) * T.sizeof);
+- GC.addRange(newPayload.ptr, sz);
+- GC.removeRange(_payload.ptr);
+- free(_payload.ptr);
++ GC.addRange(cast(void*) newPayload.ptr, sz);
++ GC.removeRange(cast(void*) _payload.ptr);
++ free(cast(void*) _payload.ptr);
+ _payload = newPayload;
+ }
+ else
+@@ -611,12 +611,17 @@ if (!is(immutable T == immutable bool))
+ return opEquals(rhs);
+ }
+
++ // fix https://issues.dlang.org/show_bug.cgi?23140
++ private alias Unshared(T) = T;
++ private alias Unshared(T: shared U, U) = U;
++
+ /// ditto
+ bool opEquals(ref const Array rhs) const
+ {
+ if (empty) return rhs.empty;
+ if (rhs.empty) return false;
+- return _data._payload == rhs._data._payload;
++
++ return cast(Unshared!(T)[]) _data._payload == cast(Unshared!(T)[]) rhs._data._payload;
+ }
+
+ /**
+@@ -1740,6 +1745,16 @@ if (!is(immutable T == immutable bool))
+ assertThrown!AssertError(array.length = 5);
+ }
+
++// https://issues.dlang.org/show_bug.cgi?id=23140
++@system unittest
++{
++ shared class C
++ {
++ }
++
++ Array!C ac;
++ ac = Array!C([new C]);
++}
+ ////////////////////////////////////////////////////////////////////////////////
+ // Array!bool
+ ////////////////////////////////////////////////////////////////////////////////
+--- a/src/libphobos/src/std/typecons.d
++++ b/src/libphobos/src/std/typecons.d
+@@ -3793,8 +3793,28 @@ Params:
+ sink.formatValue(_value, fmt);
+ }
+ }
++
++ void toString()(scope void delegate(const(char)[]) sink, scope const ref FormatSpec!char fmt) const
++ {
++ if (isNull)
++ {
++ sink.formatValue("Nullable.null", fmt);
++ }
++ else
++ {
++ sink.formatValue(_value, fmt);
++ }
++ }
+ }
+
++@system unittest
++{
++ import std.conv : to;
++
++ const Nullable!(ulong, 0) x = 1;
++ assert(x.to!string == "1");
++}
++
+ /**
+ Check if `this` is in the null state.
+
+@@ -4320,8 +4340,28 @@ Params:
+ sink.formatValue(*_value, fmt);
+ }
+ }
++
++ void toString()(scope void delegate(const(char)[]) sink, scope const ref FormatSpec!char fmt) const
++ {
++ if (isNull)
++ {
++ sink.formatValue("Nullable.null", fmt);
++ }
++ else
++ {
++ sink.formatValue(*_value, fmt);
++ }
++ }
+ }
+
++@system unittest
++{
++ import std.conv : to;
++
++ const NullableRef!(ulong) x = new ulong(1);
++ assert(x.to!string == "1");
++}
++
+ /**
+ Binds the internal state to `value`.
+
+--- a/src/libsanitizer/ChangeLog
++++ b/src/libsanitizer/ChangeLog
+@@ -1,3 +1,11 @@
++2023-05-21 Iain Sandoe <iain@sandoe.co.uk>
++
++ Backported from master:
++ 2023-04-18 Iain Sandoe <iain@sandoe.co.uk>
++
++ * configure.tgt: Unsupport Darwin22+ until a mechanism can be found
++ to locate dyld in the shared cache.
++
+ 2023-05-08 Release Manager
+
+ * GCC 12.3.0 released.
+--- a/src/libsanitizer/configure.tgt
++++ b/src/libsanitizer/configure.tgt
+@@ -64,7 +64,7 @@ case "${target}" in
+ HWASAN_SUPPORTED=yes
+ fi
+ ;;
+- x86_64-*-darwin2* | x86_64-*-darwin1[2-9]* | i?86-*-darwin1[2-9]*)
++ x86_64-*-darwin2[01]* | x86_64-*-darwin1[2-9]* | i?86-*-darwin1[2-9]*)
+ TSAN_SUPPORTED=no
+ EXTRA_CXXFLAGS="${EXTRA_CXXFLAGS} -Wl,-undefined,dynamic_lookup"
+ ;;
+--- a/src/libstdc++-v3/ChangeLog
++++ b/src/libstdc++-v3/ChangeLog
+@@ -1,3 +1,453 @@
++2023-10-03 Jonathan Wakely <jwakely@redhat.com>
++
++ * include/bits/fs_dir.h (directory_iterator::operator==):
++ Define without using a non-exported shared_ptr symbol.
++ (recursive_directory_iterator::operator==): Likewise.
++
++2023-10-03 Jonathan Wakely <jwakely@redhat.com>
++
++ Backported from master:
++ 2023-09-01 Jonathan Wakely <jwakely@redhat.com>
++
++ * testsuite/27_io/filesystem/path/108636.cc: Add dg-require for
++ filesystem support.
++
++2023-10-03 Jonathan Wakely <jwakely@redhat.com>
++
++ Backported from master:
++ 2023-06-06 Jonathan Wakely <jwakely@redhat.com>
++
++ PR libstdc++/108178
++ * src/filesystem/ops-common.h (do_copy_file): Check for empty
++ files by trying to read a character.
++ * testsuite/27_io/filesystem/operations/copy_file_108178.cc:
++ New test.
++
++2023-10-03 Jonathan Wakely <jwakely@redhat.com>
++
++ Backported from master:
++ 2023-06-06 Jonathan Wakely <jwakely@redhat.com>
++
++ * src/filesystem/ops-common.h (do_copy_file) [O_CLOEXEC]: Set
++ close-on-exec flag on file descriptors.
++
++2023-10-03 Jonathan Wakely <jwakely@redhat.com>
++
++ Backported from master:
++ 2023-03-20 Jonathan Wakely <jwakely@redhat.com>
++
++ * src/filesystem/ops-common.h (get_temp_directory_from_env): Fix
++ formatting.
++
++2023-10-03 Jonathan Wakely <jwakely@redhat.com>
++
++ Backported from master:
++ 2023-02-02 Jonathan Wakely <jwakely@redhat.com>
++
++ * src/filesystem/ops-common.h [AVR] (__unsupported): Always use
++ errc::function_not_supported instead of errc::not_supported.
++
++2023-10-02 Tim Song <t.canens.cpp@gmail.com>
++
++ Backported from master:
++ 2023-09-28 Tim Song <t.canens.cpp@gmail.com>
++
++ PR libstdc++/111050
++ * include/bits/hashtable_policy.h
++ (_Hash_node_value_base<>::_M_valptr(), _Hash_node_value_base<>::_M_v())
++ Add [[__gnu__::__always_inline__]].
++
++2023-09-27 Jonathan Wakely <jwakely@redhat.com>
++
++ Backported from master:
++ 2023-08-09 Jonathan Wakely <jwakely@redhat.com>
++
++ * include/experimental/bits/fs_path.h (path::string): Use
++ _GLIBCXX17_CONSTEXPR not _GLIBCXX_CONSTEXPR for 'if constexpr'.
++ * include/std/charconv (__to_chars_8): Initialize variable for
++ C++17 constexpr rules.
++
++2023-09-26 Jonathan Wakely <jwakely@redhat.com>
++
++ Backported from master:
++ 2023-09-25 Jonathan Wakely <jwakely@redhat.com>
++
++ PR libstdc++/111511
++ PR c++/111512
++ * include/std/array (to_array): Qualify calls to __to_array.
++ * testsuite/23_containers/array/creation/111512.cc: New test.
++
++2023-09-18 Jonathan Wakely <jwakely@redhat.com>
++
++ Backported from master:
++ 2023-09-18 Jonathan Wakely <jwakely@redhat.com>
++
++ * doc/xml/manual/configure.xml: Use conventional option name.
++ * doc/xml/manual/status_cxx2020.xml: Update.
++ * doc/html/*: Regenerate.
++
++2023-09-14 Jonathan Wakely <jwakely@redhat.com>
++
++ Backported from master:
++ 2023-09-14 Jonathan Wakely <jwakely@redhat.com>
++
++ PR c++/111357
++ * include/bits/utility.h (make_integer_sequence): Add cast.
++ * testsuite/20_util/integer_sequence/pr111357.cc: New test.
++
++2023-07-12 Jonathan Wakely <jwakely@redhat.com>
++
++ Backported from master:
++ 2023-07-12 Jonathan Wakely <jwakely@redhat.com>
++
++ PR libstdc++/95048
++ * testsuite/27_io/filesystem/path/construct/95048.cc: Check
++ conversions to wide strings.
++ * testsuite/experimental/filesystem/path/construct/95048.cc:
++ Likewise.
++
++2023-07-06 Jonathan Wakely <jwakely@redhat.com>
++
++ Backported from master:
++ 2023-07-06 Jonathan Wakely <jwakely@redhat.com>
++
++ PR libstdc++/104299
++ * doc/xml/manual/configure.xml: Describe stdio_pure argument to
++ --enable-cstdio.
++ * doc/html/manual/configure.html: Regenerate.
++
++2023-06-23 Jonathan Wakely <jwakely@redhat.com>
++
++ Backported from master:
++ 2023-06-01 Jonathan Wakely <jwakely@redhat.com>
++
++ * doc/xml/manual/evolution.xml: Document removal of implicit
++ allocator rebinding extensions in strict mode and for C++20.
++ * doc/html/*: Regenerate.
++
++2023-06-21 Jason Merrill <jason@redhat.com>
++
++ PR tree-optimization/105651
++ * include/bits/basic_string.tcc (_M_replace): Add an assert
++ to avoid -Wrestrict false positive.
++
++2023-05-30 Alexandre Oliva <oliva@adacore.com>
++
++ Backported from master:
++ 2023-05-30 Alexandre Oliva <oliva@adacore.com>
++
++ * testsuite/20_util/from_chars/4.cc: Skip long double test06
++ on x86_64-vxworks.
++ * testsuite/20_util/to_chars/long_double.cc: Xfail run on
++ x86_64-vxworks.
++
++2023-05-30 Alexandre Oliva <oliva@adacore.com>
++
++ Backported from master:
++ 2023-05-25 Alexandre Oliva <oliva@adacore.com>
++
++ * testsuite/20_util/to_chars/long_double.cc: Expect execution
++ fail on x86-vxworks.
++
++2023-05-30 Alexandre Oliva <oliva@adacore.com>
++
++ Backported from master:
++ 2023-05-05 Alexandre Oliva <oliva@adacore.com>
++
++ * testsuite/20_util/from_chars/4.cc: Skip long double test06
++ on aarch64-vxworks.
++ * testsuite/20_util/to_chars/long_double.cc: Xfail run on
++ aarch64-vxworks.
++
++2023-05-30 Matthias Kretz <m.kretz@gsi.de>
++
++ Backported from master:
++ 2023-05-30 Matthias Kretz <m.kretz@gsi.de>
++
++ PR libstdc++/109822
++ * include/experimental/bits/simd.h (to_native): Use int NTTP
++ as specified in PTS2.
++ (to_compatible): Likewise. Add missing tag to call mask
++ generator ctor.
++ * testsuite/experimental/simd/pr109822_cast_functions.cc: New
++ test.
++
++2023-05-30 Matthias Kretz <m.kretz@gsi.de>
++
++ Backported from master:
++ 2023-05-30 Matthias Kretz <m.kretz@gsi.de>
++
++ * testsuite/experimental/simd/tests/integer_operators.cc:
++ Compute expected value differently to avoid getting turned into
++ a vector shift.
++
++2023-05-30 Matthias Kretz <m.kretz@gsi.de>
++
++ Backported from master:
++ 2023-05-30 Matthias Kretz <m.kretz@gsi.de>
++
++ * testsuite/experimental/simd/tests/operator_cvt.cc: Make long
++ double <-> (u)long conversion tests conditional on sizeof(long
++ double) and sizeof(long).
++
++2023-05-30 Matthias Kretz <m.kretz@gsi.de>
++
++ Backported from master:
++ 2023-05-26 Matthias Kretz <m.kretz@gsi.de>
++
++ * include/experimental/bits/simd_ppc.h (_S_bit_shift_left):
++ Negative __y is UB, so prefer signed compare.
++
++2023-05-24 Matthias Kretz <m.kretz@gsi.de>
++
++ Backported from master:
++ 2023-05-24 Matthias Kretz <m.kretz@gsi.de>
++
++ PR libstdc++/109949
++ * include/experimental/bits/simd.h (__intrinsic_type): If
++ __ALTIVEC__ is defined, map gnu::vector_size types to their
++ corresponding __vector T types without losing unsignedness of
++ integer types. Also prefer long long over long.
++ * include/experimental/bits/simd_ppc.h (_S_popcount): Cast mask
++ object to the expected unsigned vector type.
++
++2023-05-24 Matthias Kretz <m.kretz@gsi.de>
++
++ Backported from master:
++ 2023-05-24 Matthias Kretz <m.kretz@gsi.de>
++
++ PR libstdc++/109261
++ * include/experimental/bits/simd.h (__intrinsic_type):
++ Specialize __intrinsic_type<double, 8> and
++ __intrinsic_type<double, 16> in any case, but provide the member
++ type only with __aarch64__.
++
++2023-05-24 Matthias Kretz <m.kretz@gsi.de>
++
++ Backported from master:
++ 2023-05-24 Matthias Kretz <m.kretz@gsi.de>
++
++ PR libstdc++/109261
++ * include/experimental/bits/simd_neon.h (_S_reduce): Add
++ constexpr and make NEON implementation conditional on
++ not __builtin_is_constant_evaluated.
++
++2023-05-23 Matthias Kretz <m.kretz@gsi.de>
++
++ Backported from master:
++ 2023-05-23 Matthias Kretz <m.kretz@gsi.de>
++
++ PR libstdc++/109261
++ * include/experimental/bits/simd.h (_SimdWrapper::_M_set):
++ Avoid vector builtin subscripting in constant expressions.
++ (resizing_simd_cast): Avoid memcpy if constant_evaluated.
++ (const_where_expression, where_expression, where)
++ (__extract_part, simd_mask, _SimdIntOperators, simd): Add either
++ _GLIBCXX_SIMD_CONSTEXPR (on public APIs), or constexpr (on
++ internal APIs).
++ * include/experimental/bits/simd_builtin.h (__vector_permute)
++ (__vector_shuffle, __extract_part, _GnuTraits::_SimdCastType1)
++ (_GnuTraits::_SimdCastType2, _SimdImplBuiltin)
++ (_MaskImplBuiltin::_S_store): Add constexpr.
++ (_CommonImplBuiltin::_S_store_bool_array)
++ (_SimdImplBuiltin::_S_load, _SimdImplBuiltin::_S_store)
++ (_SimdImplBuiltin::_S_reduce, _MaskImplBuiltin::_S_load): Add
++ constant_evaluated case.
++ * include/experimental/bits/simd_fixed_size.h
++ (_S_masked_load): Reword comment.
++ (__tuple_element_meta, __make_meta, _SimdTuple::_M_apply_r)
++ (_SimdTuple::_M_subscript_read, _SimdTuple::_M_subscript_write)
++ (__make_simd_tuple, __optimize_simd_tuple, __extract_part)
++ (__autocvt_to_simd, _Fixed::__traits::_SimdBase)
++ (_Fixed::__traits::_SimdCastType, _SimdImplFixedSize): Add
++ constexpr.
++ (_SimdTuple::operator[], _M_set): Add constexpr and add
++ constant_evaluated case.
++ (_MaskImplFixedSize::_S_load): Add constant_evaluated case.
++ * include/experimental/bits/simd_scalar.h: Add constexpr.
++ * include/experimental/bits/simd_x86.h (_CommonImplX86): Add
++ constexpr and add constant_evaluated case.
++ (_SimdImplX86::_S_equal_to, _S_not_equal_to, _S_less)
++ (_S_less_equal): Value-initialize to satisfy constexpr
++ evaluation.
++ (_MaskImplX86::_S_load): Add constant_evaluated case.
++ (_MaskImplX86::_S_store): Add constexpr and constant_evaluated
++ case. Value-initialize local variables.
++ (_MaskImplX86::_S_logical_and, _S_logical_or, _S_bit_not)
++ (_S_bit_and, _S_bit_or, _S_bit_xor): Add constant_evaluated
++ case.
++ * testsuite/experimental/simd/pr109261_constexpr_simd.cc: New
++ test.
++
++2023-05-23 Matthias Kretz <m.kretz@gsi.de>
++
++ Backported from master:
++ 2023-05-22 Matthias Kretz <m.kretz@gsi.de>
++
++ * include/experimental/bits/simd_builtin.h (_S_fpclassify): Move
++ __infn into #ifdef'ed block.
++ * testsuite/experimental/simd/tests/fpclassify.cc: Declare
++ constants only when used.
++ * testsuite/experimental/simd/tests/frexp.cc: Likewise.
++ * testsuite/experimental/simd/tests/logarithm.cc: Likewise.
++ * testsuite/experimental/simd/tests/trunc_ceil_floor.cc:
++ Likewise.
++ * testsuite/experimental/simd/tests/ldexp_scalbn_scalbln_modf.cc:
++ Move totest and expect1 into #ifdef'ed block.
++
++2023-05-23 Matthias Kretz <m.kretz@gsi.de>
++
++ Backported from master:
++ 2023-03-28 Matthias Kretz <m.kretz@gsi.de>
++
++ * include/experimental/bits/simd.h (is_simd_flag_type): New.
++ (_IsSimdFlagType): New.
++ (copy_from, copy_to, load ctors): Constrain _Flags using
++ _IsSimdFlagType.
++
++2023-05-23 Matthias Kretz <m.kretz@gsi.de>
++
++ Backported from master:
++ 2023-03-28 Matthias Kretz <m.kretz@gsi.de>
++
++ * include/experimental/bits/simd_x86.h (_SimdImplX86): Use
++ _Base::_S_divides if the optimized _S_divides function is hidden
++ via the preprocessor.
++
++2023-05-23 Matthias Kretz <m.kretz@gsi.de>
++
++ Backported from master:
++ 2023-03-21 Matthias Kretz <m.kretz@gsi.de>
++
++ * include/experimental/bits/simd_detail.h: Don't declare the
++ simd API as constexpr with Clang.
++ * include/experimental/bits/simd_x86.h (__movm): New.
++ (_S_blend_avx512): Resolve FIXME. Implement blend using __movm
++ and ?:.
++ (_SimdImplX86::_S_masked_unary): Clang does not implement the
++ same builtins. Implement the function using __movm, ?:, and -
++ operators on vector_size types instead.
++
++2023-05-23 Matthias Kretz <m.kretz@gsi.de>
++
++ Backported from master:
++ 2023-02-24 Matthias Kretz <m.kretz@gsi.de>
++
++ * include/experimental/bits/simd.h: Line breaks and indenting
++ fixed to follow the libstdc++ standard.
++ * include/experimental/bits/simd_builtin.h: Likewise.
++ * include/experimental/bits/simd_fixed_size.h: Likewise.
++ * include/experimental/bits/simd_neon.h: Likewise.
++ * include/experimental/bits/simd_ppc.h: Likewise.
++ * include/experimental/bits/simd_scalar.h: Likewise.
++ * include/experimental/bits/simd_x86.h: Likewise.
++
++2023-05-23 Matthias Kretz <m.kretz@gsi.de>
++
++ Backported from master:
++ 2023-02-24 Matthias Kretz <m.kretz@gsi.de>
++
++ PR libstdc++/108030
++ * include/experimental/bits/simd_fixed_size.h
++ (_SimdImplFixedSize::_S_broadcast): Replace inline with
++ _GLIBCXX_SIMD_INTRINSIC.
++ (_SimdImplFixedSize::_S_generate): Likewise.
++ (_SimdImplFixedSize::_S_load): Likewise.
++ (_SimdImplFixedSize::_S_masked_load): Likewise.
++ (_SimdImplFixedSize::_S_store): Likewise.
++ (_SimdImplFixedSize::_S_masked_store): Likewise.
++ (_SimdImplFixedSize::_S_min): Likewise.
++ (_SimdImplFixedSize::_S_max): Likewise.
++ (_SimdImplFixedSize::_S_complement): Likewise.
++ (_SimdImplFixedSize::_S_unary_minus): Likewise.
++ (_SimdImplFixedSize::_S_plus): Likewise.
++ (_SimdImplFixedSize::_S_minus): Likewise.
++ (_SimdImplFixedSize::_S_multiplies): Likewise.
++ (_SimdImplFixedSize::_S_divides): Likewise.
++ (_SimdImplFixedSize::_S_modulus): Likewise.
++ (_SimdImplFixedSize::_S_bit_and): Likewise.
++ (_SimdImplFixedSize::_S_bit_or): Likewise.
++ (_SimdImplFixedSize::_S_bit_xor): Likewise.
++ (_SimdImplFixedSize::_S_bit_shift_left): Likewise.
++ (_SimdImplFixedSize::_S_bit_shift_right): Likewise.
++ (_SimdImplFixedSize::_S_remquo): Add inline keyword (to be
++ explicit about not always-inline, yet).
++ (_SimdImplFixedSize::_S_isinf): Likewise.
++ (_SimdImplFixedSize::_S_isfinite): Likewise.
++ (_SimdImplFixedSize::_S_isnan): Likewise.
++ (_SimdImplFixedSize::_S_isnormal): Likewise.
++ (_SimdImplFixedSize::_S_signbit): Likewise.
++
++2023-05-23 Matthias Kretz <m.kretz@gsi.de>
++
++ Backported from master:
++ 2023-02-24 Matthias Kretz <m.kretz@gsi.de>
++
++ PR libstdc++/108856
++ * include/experimental/bits/simd_builtin.h
++ (_SimdImplBuiltin::_S_masked_unary): More efficient
++ implementation of masked inc-/decrement for integers and floats
++ without AVX2.
++ * include/experimental/bits/simd_x86.h
++ (_SimdImplX86::_S_masked_unary): New. Use AVX512 masked subtract
++ builtins for masked inc-/decrement.
++
++2023-05-23 Matthias Kretz <m.kretz@gsi.de>
++
++ Backported from master:
++ 2023-02-23 Matthias Kretz <m.kretz@gsi.de>
++
++ * testsuite/experimental/simd/tests/reductions.cc: Introduce
++ max_distance as the type-dependent max error.
++
++2023-05-23 Matthias Kretz <m.kretz@gsi.de>
++
++ Backported from master:
++ 2023-02-23 Matthias Kretz <m.kretz@gsi.de>
++
++ PR libstdc++/108030
++ * include/experimental/bits/simd_detail.h
++ (_GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA): Define as empty for
++ __clang__.
++
++2023-05-23 Matthias Kretz <m.kretz@gsi.de>
++
++ Backported from master:
++ 2023-02-16 Matthias Kretz <m.kretz@gsi.de>
++
++ PR libstdc++/108030
++ * include/experimental/bits/simd_detail.h: Define
++ _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA.
++ * include/experimental/bits/simd.h: Annotate lambdas with
++ _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA.
++ * include/experimental/bits/simd_builtin.h: Ditto.
++ * include/experimental/bits/simd_converter.h: Ditto.
++ * include/experimental/bits/simd_fixed_size.h: Ditto.
++ * include/experimental/bits/simd_math.h: Ditto.
++ * include/experimental/bits/simd_neon.h: Ditto.
++ * include/experimental/bits/simd_x86.h: Ditto.
++
++2023-05-16 Jonathan Wakely <jwakely@redhat.com>
++
++ Backported from master:
++ 2022-11-28 Jonathan Wakely <jwakely@redhat.com>
++
++ PR libstdc++/107801
++ * src/c++17/memory_resource.cc (chunk::_M_bytes): Change type
++ from uint32_t to bitset::size_type. Adjust static assertion.
++ (__pool_resource::_Pool::replenish): Cast to size_t after
++ multiplication instead of before.
++ (__pool_resource::_M_alloc_pools): Ensure both arguments to
++ std::max have type size_t.
++
++2023-05-11 Jonathan Wakely <jwakely@redhat.com>
++
++ Backported from master:
++ 2022-11-16 Jonathan Wakely <jwakely@redhat.com>
++
++ * python/libstdcxx/v6/printers.py (StdExpAnyPrinter): Make
++ expansion of std::string in manager name more robust.
++
+ 2023-05-08 Release Manager
+
+ * GCC 12.3.0 released.
+--- a/src/libstdc++-v3/doc/html/manual/api.html
++++ b/src/libstdc++-v3/doc/html/manual/api.html
+@@ -370,6 +370,11 @@ Calling a <code class="code">std::bind</code> result as volatile was deprecated
+ For the non-default <code class="option">--enable-symvers=gnu-versioned-namespace</code>
+ configuration, the shared library SONAME has been changed to
+ <code class="filename">libstdc++.so.8</code>.
++</p><p>
++ The extension allowing containers to be instantiated with an allocator
++ that doesn't match the container's value type is no longer allowed in
++ strict (<code class="option">-std=c++NN</code>) modes, only in
++ <code class="option">-std=gnu++NN</code> modes.
+ </p></div><div class="section"><div class="titlepage"><div><div><h3 class="title"><a id="api.rel_91"></a><code class="constant">9</code></h3></div></div></div><p>
+ C++17 header
+ <code class="filename"><memory_resource></code>
+@@ -425,6 +430,10 @@ Calling a <code class="code">std::bind</code> result as volatile was deprecated
+ and
+ <code class="filename"><stop_token></code>
+ added.
++</p><p>
++ The extension allowing containers to be instantiated with an allocator
++ that doesn't match the container's value type is no longer allowed in
++ C++20 mode, even in non-strict <code class="option">-std=gnu++20</code> mode.
+ </p></div><div class="section"><div class="titlepage"><div><div><h3 class="title"><a id="api.rel_111"></a><code class="constant">11</code></h3></div></div></div><p>
+ The <code class="option">--enable-cheaders=c_std</code> configuration
+ was deprecated.
+--- a/src/libstdc++-v3/doc/html/manual/configure.html
++++ b/src/libstdc++-v3/doc/html/manual/configure.html
+@@ -38,9 +38,14 @@
+ </p><pre class="programlisting">
+ --with-gxx-include-dir=/foo/H-x86-gcc-3-c-gxx-inc/include/4.4-20090404</pre></dd><dt><span class="term"><code class="code">--enable-cstdio</code></span></dt><dd><p>This is an abbreviated form of <code class="code">'--enable-cstdio=stdio'</code>
+ (described next).
+- </p></dd><dt><span class="term"><code class="code">--enable-cstdio=OPTION</code></span></dt><dd><p>Select a target-specific I/O package. At the moment, the only
+- choice is to use 'stdio', a generic "C" abstraction.
+- The default is 'stdio'. This option can change the library ABI.
++ </p></dd><dt><span class="term"><code class="code">--enable-cstdio=OPTION</code></span></dt><dd><p>Select a target-specific I/O package. The choices are 'stdio'
++ which is a generic abstraction using POSIX file I/O APIs
++ (<code class="function">read</code>, <code class="function">write</code>,
++ <code class="function">lseek</code>, etc.), and 'stdio_pure' which is similar
++ but only uses standard C file I/O APIs (<code class="function">fread</code>,
++ <code class="function">fwrite</code>, <code class="function">fseek</code>, etc.).
++ The 'stdio_posix' choice is a synonym for 'stdio'.
++ The default is 'stdio'. This option can change the library ABI.
+ </p></dd><dt><span class="term"><code class="code">--enable-clocale</code></span></dt><dd><p>This is an abbreviated form of <code class="code">'--enable-clocale=generic'</code>
+ (described next).
+ </p></dd><dt><span class="term"><code class="code">--enable-clocale=OPTION</code></span></dt><dd><p>Select a target-specific underlying locale package. The
+@@ -203,8 +208,8 @@
+ C++ includes. If enabled (as by default), and the compiler
+ seems capable of passing the simple sanity checks thrown at
+ it, try to build stdc++.h.gch as part of the make process.
+- In addition, this generated file is used later on (by appending <code class="code">
+- --include bits/stdc++.h </code> to CXXFLAGS) when running the
++ In addition, this generated file is used later on (by appending
++ <code class="code">-include bits/stdc++.h</code> to CXXFLAGS) when running the
+ testsuite.
+ </p></dd><dt><span class="term"><code class="code">--enable-extern-template</code>[default]</span></dt><dd><p>Use extern template to pre-instantiate all required
+ specializations for certain types defined in the standard libraries.
+--- a/src/libstdc++-v3/doc/html/manual/status.html
++++ b/src/libstdc++-v3/doc/html/manual/status.html
+@@ -1325,10 +1325,10 @@ or any notes about the implementation.
+ </td><td align="center"> 9.1 </td><td align="left"> <code class="code">__cpp_lib_type_identity >= 201806L</code> (since 9.4, see Note 1) </td></tr><tr><td align="left"> <code class="code">unwrap_ref_decay</code> and <code class="code">unwrap_reference</code> </td><td align="left">
+ <a class="link" href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0318r1.pdf" target="_top">
+ P0318R1 </a>
+- </td><td align="center"> 9.1 </td><td align="left"> <code class="code">__cpp_lib_unwrap_ref >= 201811L</code> (since 9.4, see Note 1) </td></tr><tr bgcolor="#B0B0B0"><td align="left"> Improving Completeness Requirements for Type Traits </td><td align="left">
++ </td><td align="center"> 9.1 </td><td align="left"> <code class="code">__cpp_lib_unwrap_ref >= 201811L</code> (since 9.4, see Note 1) </td></tr><tr><td align="left"> Improving Completeness Requirements for Type Traits </td><td align="left">
+ <a class="link" href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p1285r0.pdf" target="_top">
+ P1285R0 </a>
+- </td><td align="center"> Partial </td><td align="left"> </td></tr><tr><td align="left"> Missing feature test macros </td><td align="left">
++ </td><td align="center"> — </td><td align="left"> Most misuses are diagnosed, but not all. </td></tr><tr><td align="left"> Missing feature test macros </td><td align="left">
+ <a class="link" href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p1353r0.html" target="_top">
+ P1353R0 </a>
+ </td><td align="center"> 9.1 </td><td align="left"> </td></tr><tr><td align="left"> Making std::underlying_type SFINAE-friendly </td><td align="left">
+@@ -1411,18 +1411,18 @@ or any notes about the implementation.
+ </td><td align="center"> 10.1 </td><td align="left"> </td></tr><tr><td align="left"> Ranges Design Cleanup </td><td align="left">
+ <a class="link" href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p1252r2.pdf" target="_top">
+ P1252R2 </a>
+- </td><td align="center"> 10.1 </td><td align="left"> </td></tr><tr bgcolor="#C8B0B0"><td align="left"> Avoid template bloat for <code class="code">safe_ranges</code> in combination with ‘subrange-y’ view adaptors.</td><td align="left">
++ </td><td align="center"> 10.1 </td><td align="left"> </td></tr><tr><td align="left"> Avoid template bloat for <code class="code">safe_ranges</code> in combination with ‘subrange-y’ view adaptors.</td><td align="left">
+ <a class="link" href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p1739r4.html" target="_top">
+ P1739R4 </a>
+- </td><td align="center"> </td><td align="left"> </td></tr><tr><td colspan="4" align="left">
++ </td><td align="center"> 12.1 </td><td align="left"> </td></tr><tr><td colspan="4" align="left">
+ <span class="bold"><strong>Time, dates, calendars, time zones</strong></span>
+- </td></tr><tr bgcolor="#C8B0B0"><td align="left"> Extending chrono to Calendars and Time Zones </td><td align="left">
++ </td></tr><tr bgcolor="#B0B0B0"><td align="left"> Extending chrono to Calendars and Time Zones </td><td align="left">
+ <a class="link" href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0355r7.html" target="_top">
+ P0355R7 </a>
+- </td><td align="left"> </td><td align="left"> <code class="code">__cpp_lib_chrono >= 201803L</code> </td></tr><tr bgcolor="#C8B0B0"><td align="left"> Miscellaneous minor fixes for chrono </td><td align="left">
++ </td><td align="left"> (see Note 2) </td><td align="left"> <code class="code">__cpp_lib_chrono >= 201803L</code> </td></tr><tr><td align="left"> Miscellaneous minor fixes for chrono </td><td align="left">
+ <a class="link" href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p1466r3.html" target="_top">
+ P1466R3 </a>
+- </td><td align="left"> </td><td align="left"> <code class="code">__cpp_lib_chrono >= 201907L</code> </td></tr><tr><td align="left"> <code class="code"><chrono></code> <code class="code">zero()</code>, <code class="code">min()</code>, and <code class="code">max()</code> should be <code class="code">noexcept</code> </td><td align="left">
++ </td><td align="left"> (see Note 2) </td><td align="left"> <code class="code">__cpp_lib_chrono >= 201907L</code> </td></tr><tr><td align="left"> <code class="code"><chrono></code> <code class="code">zero()</code>, <code class="code">min()</code>, and <code class="code">max()</code> should be <code class="code">noexcept</code> </td><td align="left">
+ <a class="link" href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0972r0.pdf" target="_top">
+ P0972R0 </a>
+ </td><td align="center"> 9.1 </td><td align="left"> </td></tr><tr><td colspan="4" align="left">
+@@ -1467,10 +1467,10 @@ or any notes about the implementation.
+ </td></tr><tr><td align="left"> String Prefix and Suffix Checking </td><td align="left">
+ <a class="link" href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2017/p0457r2.html" target="_top">
+ P0457R2 </a>
+- </td><td align="center"> 9.1 </td><td align="left"> <code class="code">__cpp_lib_starts_ends_with >= 201711L</code> (since 9.4, see Note 1) </td></tr><tr bgcolor="#C8B0B0"><td align="left"> Update The Reference To The Unicode Standard </td><td align="left">
++ </td><td align="center"> 9.1 </td><td align="left"> <code class="code">__cpp_lib_starts_ends_with >= 201711L</code> (since 9.4, see Note 1) </td></tr><tr><td align="left"> Update The Reference To The Unicode Standard </td><td align="left">
+ <a class="link" href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p1025r1.html" target="_top">
+ P1025R1 </a>
+- </td><td align="center"> </td><td align="left"> </td></tr><tr><td colspan="4" align="left">
++ </td><td align="center"> —</td><td align="left"> </td></tr><tr><td colspan="4" align="left">
+ <span class="bold"><strong>Containers</strong></span>
+ </td></tr><tr><td align="left"> span: bounds-safe views for sequences of objects </td><td align="left">
+ <a class="link" href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0122r7.pdf" target="_top">
+@@ -1492,10 +1492,10 @@ or any notes about the implementation.
+ </td><td align="center"> 10.1 </td><td align="left"> <code class="code">__cpp_lib_to_array >= 201907L</code> </td></tr><tr><td align="left"> Checking for Existence of an Element in Associative Containers </td><td align="left">
+ <a class="link" href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0458r2.html" target="_top">
+ P0458R2 </a>
+- </td><td align="center"> 9.1 </td><td align="left"> </td></tr><tr bgcolor="#C8B0B0"><td align="left"> Comparing Unordered Containers </td><td align="left">
++ </td><td align="center"> 9.1 </td><td align="left"> </td></tr><tr><td align="left"> Comparing Unordered Containers </td><td align="left">
+ <a class="link" href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0809r0.pdf" target="_top">
+ P0809R0 </a>
+- </td><td align="center"> </td><td align="left"> </td></tr><tr><td align="left"> Heterogeneous lookup for unordered containers </td><td align="left">
++ </td><td align="center"> — </td><td align="left"> </td></tr><tr><td align="left"> Heterogeneous lookup for unordered containers </td><td align="left">
+ <a class="link" href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0919r3.html" target="_top">
+ P0919R3 </a>
+ </td><td align="center"> 11.1 </td><td align="left"> <code class="code">__cpp_lib_generic_unordered_lookup >= 201811</code> </td></tr><tr><td align="left"> Refinement Proposal for P0919 </td><td align="left">
+@@ -1567,7 +1567,7 @@ or any notes about the implementation.
+ </td><td align="center"> 9.1 </td><td align="left"> </td></tr><tr bgcolor="#C8B0B0"><td align="left"> Thou Shalt Not Specialize std Function Templates! </td><td align="left">
+ <a class="link" href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0551r3.pdf" target="_top">
+ P0551R3 </a>
+- </td><td align="center"> </td><td align="left"> </td></tr><tr><td align="left"> Bit-casting object representations </td><td align="left">
++ </td><td align="center"> </td><td align="left">These changes will not be implemented.</td></tr><tr><td align="left"> Bit-casting object representations </td><td align="left">
+ <a class="link" href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0476r2.html" target="_top">
+ P0476R2 </a>
+ </td><td align="center"> 11.1 </td><td align="left"> <code class="code">__cpp_lib_bit_cast >= 201806L</code> </td></tr><tr><td align="left"> Integral power-of-2 operations </td><td align="left">
+@@ -1588,10 +1588,10 @@ or any notes about the implementation.
+ </td><td align="center"> — </td><td align="left"> </td></tr><tr><td align="left"> Add <code class="code">shift</code> to <code class="code"><algorithm></code> </td><td align="left">
+ <a class="link" href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0769r2.pdf" target="_top">
+ P0769R2 </a>
+- </td><td align="center"> 10.1 </td><td align="left"> <code class="code">__cpp_lib_shift >= 201806L</code> </td></tr><tr bgcolor="#C8B0B0"><td align="left"> Standard Library Specification in a Concepts and Contracts World </td><td align="left">
++ </td><td align="center"> 10.1 </td><td align="left"> <code class="code">__cpp_lib_shift >= 201806L</code> </td></tr><tr><td align="left"> Standard Library Specification in a Concepts and Contracts World </td><td align="left">
+ <a class="link" href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0788r3.pdf" target="_top">
+ P0788R3 </a>
+- </td><td align="center"> </td><td align="left"> </td></tr><tr><td align="left"> <code class="code">explicit(bool)</code> </td><td align="left">
++ </td><td align="center"> — </td><td align="left"> </td></tr><tr><td align="left"> <code class="code">explicit(bool)</code> </td><td align="left">
+ <a class="link" href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0892r2.html" target="_top">
+ P0892R2 </a>
+ </td><td align="center"> — </td><td align="left"> </td></tr><tr><td align="left"> Eradicating unnecessarily explicit default constructors from the standard library </td><td align="left">
+@@ -1627,10 +1627,10 @@ or any notes about the implementation.
+ </td><td align="center"> 7.1 </td><td align="left"> </td></tr><tr><td align="left"> Editorial Guidance for merging P0019r8 and P0528r3 </td><td align="left">
+ <a class="link" href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p1123r0.html" target="_top">
+ P1123R0 </a>
+- </td><td align="center"> — </td><td align="left"> </td></tr><tr bgcolor="#C8B0B0"><td align="left"> Cleaning up Clause 20 </td><td align="left">
++ </td><td align="center"> — </td><td align="left"> </td></tr><tr><td align="left"> Cleaning up Clause 20 </td><td align="left">
+ <a class="link" href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p1148r0.pdf" target="_top">
+ P1148R0 </a>
+- </td><td align="center"> </td><td align="left"> </td></tr><tr bgcolor="#C8B0B0"><td align="left"> Completing the Rebase of Library Fundamentals, Version 3, Working Draft </td><td align="left">
++ </td><td align="center"> — </td><td align="left"> </td></tr><tr bgcolor="#C8B0B0"><td align="left"> Completing the Rebase of Library Fundamentals, Version 3, Working Draft </td><td align="left">
+ <a class="link" href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p1210r0.html" target="_top">
+ P1210R0 </a>
+ </td><td align="center"> </td><td align="left"> </td></tr><tr><td align="left"> Alternative Wording for P0907R4 Signed Integers are Two's Complement </td><td align="left">
+@@ -1666,13 +1666,13 @@ or any notes about the implementation.
+ </td><td align="left">
+ <a class="link" href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p1463r1.pdf" target="_top">
+ P1463R1 </a>
+- </td><td align="center"> 10.1 </td><td align="left"> </td></tr><tr bgcolor="#C8B0B0"><td align="left">
++ </td><td align="center"> 10.1 </td><td align="left"> </td></tr><tr><td align="left">
+ Mandating the Standard Library:
+ Clause 22 - Iterators library
+ </td><td align="left">
+ <a class="link" href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p1464r1.pdf" target="_top">
+ P1464R1 </a>
+- </td><td align="center"> </td><td align="left"> </td></tr><tr><td align="left"> Make <code class="code">create_directory()</code> Intuitive </td><td align="left">
++ </td><td align="center"> — </td><td align="left"> </td></tr><tr><td align="left"> Make <code class="code">create_directory()</code> Intuitive </td><td align="left">
+ <a class="link" href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p1164r1.pdf" target="_top">
+ P1164R1
+ </a>
+@@ -1708,6 +1708,9 @@ or any notes about the implementation.
+ Note 1: This feature is supported in older releases but the
+ <code class="code">__cpp_lib</code> macro is not defined to the right value
+ (or not defined at all) until the version shown in parentheses.
++</p><p>
++Note 2: The C++20 calendar types are supported since 11.1,
++time zones, UTC, formatting and parsing are not supported.
+ </p></div><div class="section"><div class="titlepage"><div><div><h3 class="title"><a id="status.iso.2023"></a>C++ 2023</h3></div></div></div><p>
+ In this implementation the <code class="literal">-std=gnu++23</code> or
+ <code class="literal">-std=c++23</code> flag must be used to enable language
+--- a/src/libstdc++-v3/doc/xml/manual/configure.xml
++++ b/src/libstdc++-v3/doc/xml/manual/configure.xml
+@@ -74,9 +74,14 @@
+ </listitem></varlistentry>
+
+ <varlistentry><term><code>--enable-cstdio=OPTION</code></term>
+- <listitem><para>Select a target-specific I/O package. At the moment, the only
+- choice is to use 'stdio', a generic "C" abstraction.
+- The default is 'stdio'. This option can change the library ABI.
++ <listitem><para>Select a target-specific I/O package. The choices are 'stdio'
++ which is a generic abstraction using POSIX file I/O APIs
++ (<function>read</function>, <function>write</function>,
++ <function>lseek</function>, etc.), and 'stdio_pure' which is similar
++ but only uses standard C file I/O APIs (<function>fread</function>,
++ <function>fwrite</function>, <function>fseek</function>, etc.).
++ The 'stdio_posix' choice is a synonym for 'stdio'.
++ The default is 'stdio'. This option can change the library ABI.
+ </para>
+ </listitem></varlistentry>
+
+@@ -336,8 +341,8 @@
+ C++ includes. If enabled (as by default), and the compiler
+ seems capable of passing the simple sanity checks thrown at
+ it, try to build stdc++.h.gch as part of the make process.
+- In addition, this generated file is used later on (by appending <code>
+- --include bits/stdc++.h </code> to CXXFLAGS) when running the
++ In addition, this generated file is used later on (by appending
++ <code>-include bits/stdc++.h</code> to CXXFLAGS) when running the
+ testsuite.
+ </para>
+ </listitem></varlistentry>
+--- a/src/libstdc++-v3/doc/xml/manual/evolution.xml
++++ b/src/libstdc++-v3/doc/xml/manual/evolution.xml
+@@ -915,6 +915,13 @@ Calling a <code>std::bind</code> result as volatile was deprecated for C++17.
+ <filename>libstdc++.so.8</filename>.
+ </para>
+
++<para>
++ The extension allowing containers to be instantiated with an allocator
++ that doesn't match the container's value type is no longer allowed in
++ strict (<option>-std=c++NN</option>) modes, only in
++ <option>-std=gnu++NN</option> modes.
++</para>
++
+ </section>
+
+ <section xml:id="api.rel_91"><info><title><constant>9</constant></title></info>
+@@ -998,6 +1005,12 @@ Calling a <code>std::bind</code> result as volatile was deprecated for C++17.
+ added.
+ </para>
+
++<para>
++ The extension allowing containers to be instantiated with an allocator
++ that doesn't match the container's value type is no longer allowed in
++ C++20 mode, even in non-strict <option>-std=gnu++20</option> mode.
++</para>
++
+ </section>
+
+ <section xml:id="api.rel_111"><info><title><constant>11</constant></title></info>
+--- a/src/libstdc++-v3/doc/xml/manual/status_cxx2020.xml
++++ b/src/libstdc++-v3/doc/xml/manual/status_cxx2020.xml
+@@ -251,14 +251,13 @@ or any notes about the implementation.
+ </row>
+
+ <row>
+- <?dbhtml bgcolor="#B0B0B0" ?>
+ <entry> Improving Completeness Requirements for Type Traits </entry>
+ <entry>
+ <link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p1285r0.pdf">
+ P1285R0 </link>
+ </entry>
+- <entry align="center"> Partial </entry>
+- <entry />
++ <entry align="center"> — </entry>
++ <entry> Most misuses are diagnosed, but not all. </entry>
+ </row>
+
+ <row>
+@@ -542,13 +541,12 @@ or any notes about the implementation.
+ </row>
+
+ <row>
+- <?dbhtml bgcolor="#C8B0B0" ?>
+ <entry> Avoid template bloat for <code>safe_ranges</code> in combination with ‘subrange-y’ view adaptors.</entry>
+ <entry>
+ <link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p1739r4.html">
+ P1739R4 </link>
+ </entry>
+- <entry align="center"> </entry>
++ <entry align="center"> 12.1 </entry>
+ <entry />
+ </row>
+
+@@ -560,24 +558,23 @@ or any notes about the implementation.
+ </row>
+
+ <row>
+- <?dbhtml bgcolor="#C8B0B0" ?>
++ <?dbhtml bgcolor="#B0B0B0" ?>
+ <entry> Extending chrono to Calendars and Time Zones </entry>
+ <entry>
+ <link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0355r7.html">
+ P0355R7 </link>
+ </entry>
+- <entry/>
++ <entry> (see Note 2) </entry>
+ <entry> <code>__cpp_lib_chrono >= 201803L</code> </entry>
+ </row>
+
+ <row>
+- <?dbhtml bgcolor="#C8B0B0" ?>
+ <entry> Miscellaneous minor fixes for chrono </entry>
+ <entry>
+ <link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p1466r3.html">
+ P1466R3 </link>
+ </entry>
+- <entry/>
++ <entry> (see Note 2) </entry>
+ <entry> <code>__cpp_lib_chrono >= 201907L</code> </entry>
+ </row>
+
+@@ -717,13 +714,12 @@ or any notes about the implementation.
+ </row>
+
+ <row>
+- <?dbhtml bgcolor="#C8B0B0" ?>
+ <entry> Update The Reference To The Unicode Standard </entry>
+ <entry>
+ <link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p1025r1.html">
+ P1025R1 </link>
+ </entry>
+- <entry align="center"> </entry>
++ <entry align="center"> —</entry>
+ <entry />
+ </row>
+
+@@ -797,13 +793,12 @@ or any notes about the implementation.
+ </row>
+
+ <row>
+- <?dbhtml bgcolor="#C8B0B0" ?>
+ <entry> Comparing Unordered Containers </entry>
+ <entry>
+ <link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0809r0.pdf">
+ P0809R0 </link>
+ </entry>
+- <entry align="center"> </entry>
++ <entry align="center"> — </entry>
+ <entry />
+ </row>
+
+@@ -1030,7 +1025,7 @@ or any notes about the implementation.
+ P0551R3 </link>
+ </entry>
+ <entry align="center"> </entry>
+- <entry />
++ <entry>These changes will not be implemented.</entry>
+ </row>
+
+ <row>
+@@ -1104,13 +1099,12 @@ or any notes about the implementation.
+ </row>
+
+ <row>
+- <?dbhtml bgcolor="#C8B0B0" ?>
+ <entry> Standard Library Specification in a Concepts and Contracts World </entry>
+ <entry>
+ <link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0788r3.pdf">
+ P0788R3 </link>
+ </entry>
+- <entry align="center"> </entry>
++ <entry align="center"> — </entry>
+ <entry />
+ </row>
+
+@@ -1235,13 +1229,12 @@ or any notes about the implementation.
+ </row>
+
+ <row>
+- <?dbhtml bgcolor="#C8B0B0" ?>
+ <entry> Cleaning up Clause 20 </entry>
+ <entry>
+ <link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p1148r0.pdf">
+ P1148R0 </link>
+ </entry>
+- <entry align="center"> </entry>
++ <entry align="center"> — </entry>
+ <entry />
+ </row>
+
+@@ -1332,7 +1325,6 @@ or any notes about the implementation.
+ </row>
+
+ <row>
+- <?dbhtml bgcolor="#C8B0B0" ?>
+ <entry>
+ Mandating the Standard Library:
+ Clause 22 - Iterators library
+@@ -1341,7 +1333,7 @@ or any notes about the implementation.
+ <link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p1464r1.pdf">
+ P1464R1 </link>
+ </entry>
+- <entry align="center"> </entry>
++ <entry align="center"> — </entry>
+ <entry />
+ </row>
+
+@@ -1450,4 +1442,9 @@ Note 1: This feature is supported in older releases but the
+ (or not defined at all) until the version shown in parentheses.
+ </para>
+
++<para>
++Note 2: The C++20 calendar types are supported since 11.1,
++time zones, UTC, formatting and parsing are not supported.
++</para>
++
+ </section>
+--- a/src/libstdc++-v3/include/bits/basic_string.tcc
++++ b/src/libstdc++-v3/include/bits/basic_string.tcc
+@@ -529,6 +529,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
+ {
+ const size_type __nleft = (__p + __len1) - __s;
+ this->_S_move(__p, __s, __nleft);
++ // Tell the middle-end that the copy can't overlap
++ // (PR105651).
++ if (__len2 < __nleft)
++ __builtin_unreachable();
+ this->_S_copy(__p + __nleft, __p + __len2,
+ __len2 - __nleft);
+ }
+--- a/src/libstdc++-v3/include/bits/fs_dir.h
++++ b/src/libstdc++-v3/include/bits/fs_dir.h
+@@ -433,7 +433,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CXX11
+ // _GLIBCXX_RESOLVE_LIB_DEFECTS
+ // 3719. Directory iterators should be usable with default sentinel
+ bool operator==(default_sentinel_t) const noexcept
+- { return !_M_dir; }
++ { return *this == directory_iterator(); }
+ #endif
+
+ #if __cpp_impl_three_way_comparison < 201907L
+@@ -541,7 +541,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CXX11
+ // _GLIBCXX_RESOLVE_LIB_DEFECTS
+ // 3719. Directory iterators should be usable with default sentinel
+ bool operator==(default_sentinel_t) const noexcept
+- { return !_M_dirs; }
++ { return *this == recursive_directory_iterator(); }
+ #endif
+
+ #if __cpp_impl_three_way_comparison < 201907L
+--- a/src/libstdc++-v3/include/bits/hashtable_policy.h
++++ b/src/libstdc++-v3/include/bits/hashtable_policy.h
+@@ -291,18 +291,22 @@ namespace __detail
+
+ __gnu_cxx::__aligned_buffer<_Value> _M_storage;
+
++ [[__gnu__::__always_inline__]]
+ _Value*
+ _M_valptr() noexcept
+ { return _M_storage._M_ptr(); }
+
++ [[__gnu__::__always_inline__]]
+ const _Value*
+ _M_valptr() const noexcept
+ { return _M_storage._M_ptr(); }
+
++ [[__gnu__::__always_inline__]]
+ _Value&
+ _M_v() noexcept
+ { return *_M_valptr(); }
+
++ [[__gnu__::__always_inline__]]
+ const _Value&
+ _M_v() const noexcept
+ { return *_M_valptr(); }
+--- a/src/libstdc++-v3/include/bits/utility.h
++++ b/src/libstdc++-v3/include/bits/utility.h
+@@ -173,7 +173,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
+ #if __has_builtin(__make_integer_seq)
+ = __make_integer_seq<integer_sequence, _Tp, _Num>;
+ #else
+- = integer_sequence<_Tp, __integer_pack(_Num)...>;
++ = integer_sequence<_Tp, __integer_pack(_Tp(_Num))...>;
+ #endif
+
+ /// Alias template index_sequence
+--- a/src/libstdc++-v3/include/experimental/bits/fs_path.h
++++ b/src/libstdc++-v3/include/experimental/bits/fs_path.h
+@@ -1049,7 +1049,7 @@ namespace __detail
+ inline std::basic_string<_CharT, _Traits, _Allocator>
+ path::string(const _Allocator& __a) const
+ {
+- if _GLIBCXX_CONSTEXPR (is_same<_CharT, value_type>::value)
++ if _GLIBCXX17_CONSTEXPR (is_same<_CharT, value_type>::value)
+ return { _M_pathname.begin(), _M_pathname.end(), __a };
+
+ using _WString = basic_string<_CharT, _Traits, _Allocator>;
+--- a/src/libstdc++-v3/include/experimental/bits/simd.h
++++ b/src/libstdc++-v3/include/experimental/bits/simd.h
+@@ -180,10 +180,7 @@ struct vector_aligned_tag
+ template <typename _Tp, typename _Up>
+ _GLIBCXX_SIMD_INTRINSIC static constexpr _Up*
+ _S_apply(_Up* __ptr)
+- {
+- return static_cast<_Up*>(
+- __builtin_assume_aligned(__ptr, _S_alignment<_Tp, _Up>));
+- }
++ { return static_cast<_Up*>(__builtin_assume_aligned(__ptr, _S_alignment<_Tp, _Up>)); }
+ };
+
+ template <size_t _Np> struct overaligned_tag
+@@ -288,13 +285,15 @@ namespace __detail
+ // expression. math_errhandling may expand to an extern symbol, in which case a constexpr value
+ // must be guessed.
+ template <int = math_errhandling>
+- constexpr bool __handle_fpexcept_impl(int)
++ constexpr bool
++ __handle_fpexcept_impl(int)
+ { return math_errhandling & MATH_ERREXCEPT; }
+ #endif
+
+ // Fallback if math_errhandling doesn't work: with fast-math assume floating-point exceptions are
+ // ignored, otherwise implement correct exception behavior.
+- constexpr bool __handle_fpexcept_impl(float)
++ constexpr bool
++ __handle_fpexcept_impl(float)
+ {
+ #if defined __FAST_MATH__
+ return false;
+@@ -609,28 +608,34 @@ template <size_t _Bytes>
+ operator&(_Ip __rhs) const
+ {
+ return __generate_from_n_evaluations<_Np, _Ip>(
+- [&](auto __i) { return __rhs._M_data[__i] & _M_data[__i]; });
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __rhs._M_data[__i] & _M_data[__i];
++ });
+ }
+
+ _GLIBCXX_SIMD_INTRINSIC constexpr _Ip
+ operator|(_Ip __rhs) const
+ {
+ return __generate_from_n_evaluations<_Np, _Ip>(
+- [&](auto __i) { return __rhs._M_data[__i] | _M_data[__i]; });
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __rhs._M_data[__i] | _M_data[__i];
++ });
+ }
+
+ _GLIBCXX_SIMD_INTRINSIC constexpr _Ip
+ operator^(_Ip __rhs) const
+ {
+ return __generate_from_n_evaluations<_Np, _Ip>(
+- [&](auto __i) { return __rhs._M_data[__i] ^ _M_data[__i]; });
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __rhs._M_data[__i] ^ _M_data[__i];
++ });
+ }
+
+ _GLIBCXX_SIMD_INTRINSIC constexpr _Ip
+ operator~() const
+ {
+ return __generate_from_n_evaluations<_Np, _Ip>(
+- [&](auto __i) { return ~_M_data[__i]; });
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { return ~_M_data[__i]; });
+ }
+ };
+ return _Ip{};
+@@ -743,8 +748,7 @@ template <typename _Tp, typename _Up>
+ // __invoke_ub{{{
+ template <typename... _Args>
+ [[noreturn]] _GLIBCXX_SIMD_ALWAYS_INLINE void
+- __invoke_ub([[maybe_unused]] const char* __msg,
+- [[maybe_unused]] const _Args&... __args)
++ __invoke_ub([[maybe_unused]] const char* __msg, [[maybe_unused]] const _Args&... __args)
+ {
+ #ifdef _GLIBCXX_DEBUG_UB
+ __builtin_fprintf(stderr, __msg, __args...);
+@@ -789,11 +793,14 @@ class _ExactBool
+ const bool _M_data;
+
+ public:
+- _GLIBCXX_SIMD_INTRINSIC constexpr _ExactBool(bool __b) : _M_data(__b) {}
++ _GLIBCXX_SIMD_INTRINSIC constexpr
++ _ExactBool(bool __b) : _M_data(__b) {}
+
+ _ExactBool(int) = delete;
+
+- _GLIBCXX_SIMD_INTRINSIC constexpr operator bool() const { return _M_data; }
++ _GLIBCXX_SIMD_INTRINSIC constexpr
++ operator bool() const
++ { return _M_data; }
+ };
+
+ // }}}
+@@ -1391,7 +1398,7 @@ template <size_t _Np, bool _Sanitized>
+ operator^=(const _BitMask& __b) & noexcept
+ {
+ __execute_n_times<_S_array_size>(
+- [&](auto __i) { _M_bits[__i] ^= __b._M_bits[__i]; });
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { _M_bits[__i] ^= __b._M_bits[__i]; });
+ return *this;
+ }
+
+@@ -1399,7 +1406,7 @@ template <size_t _Np, bool _Sanitized>
+ operator|=(const _BitMask& __b) & noexcept
+ {
+ __execute_n_times<_S_array_size>(
+- [&](auto __i) { _M_bits[__i] |= __b._M_bits[__i]; });
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { _M_bits[__i] |= __b._M_bits[__i]; });
+ return *this;
+ }
+
+@@ -1407,7 +1414,7 @@ template <size_t _Np, bool _Sanitized>
+ operator&=(const _BitMask& __b) & noexcept
+ {
+ __execute_n_times<_S_array_size>(
+- [&](auto __i) { _M_bits[__i] &= __b._M_bits[__i]; });
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { _M_bits[__i] &= __b._M_bits[__i]; });
+ return *this;
+ }
+
+@@ -1482,8 +1489,7 @@ template <typename _Tp>
+
+ // else, use GNU-style builtin vector types
+ template <typename _Tp, size_t _Np>
+- struct __vector_type_n<_Tp, _Np,
+- enable_if_t<__is_vectorizable_v<_Tp> && _Np >= 2>>
++ struct __vector_type_n<_Tp, _Np, enable_if_t<__is_vectorizable_v<_Tp> && _Np >= 2>>
+ {
+ static constexpr size_t _S_Np2 = std::__bit_ceil(_Np * sizeof(_Tp));
+
+@@ -1764,8 +1770,7 @@ template <typename _To, typename _From>
+ // }}}
+ // __to_intrin {{{
+ template <typename _Tp, typename _TVT = _VectorTraits<_Tp>,
+- typename _R
+- = __intrinsic_type_t<typename _TVT::value_type, _TVT::_S_full_size>>
++ typename _R = __intrinsic_type_t<typename _TVT::value_type, _TVT::_S_full_size>>
+ _GLIBCXX_SIMD_INTRINSIC constexpr _R
+ __to_intrin(_Tp __x)
+ {
+@@ -1786,9 +1791,7 @@ template <typename _Tp, typename _TVT = _VectorTraits<_Tp>,
+ template <typename _Tp, typename... _Args>
+ _GLIBCXX_SIMD_INTRINSIC constexpr __vector_type_t<_Tp, sizeof...(_Args)>
+ __make_vector(const _Args&... __args)
+- {
+- return __vector_type_t<_Tp, sizeof...(_Args)>{static_cast<_Tp>(__args)...};
+- }
++ { return __vector_type_t<_Tp, sizeof...(_Args)>{static_cast<_Tp>(__args)...}; }
+
+ // }}}
+ // __vector_broadcast{{{
+@@ -1807,10 +1810,7 @@ template <size_t _Np, typename _Tp>
+ template <typename _Tp, size_t _Np, typename _Gp, size_t... _I>
+ _GLIBCXX_SIMD_INTRINSIC constexpr __vector_type_t<_Tp, _Np>
+ __generate_vector_impl(_Gp&& __gen, index_sequence<_I...>)
+- {
+- return __vector_type_t<_Tp, _Np>{
+- static_cast<_Tp>(__gen(_SizeConstant<_I>()))...};
+- }
++ { return __vector_type_t<_Tp, _Np>{ static_cast<_Tp>(__gen(_SizeConstant<_I>()))...}; }
+
+ template <typename _V, typename _VVT = _VectorTraits<_V>, typename _Gp>
+ _GLIBCXX_SIMD_INTRINSIC constexpr _V
+@@ -2023,8 +2023,7 @@ template <typename _Tp, typename _TVT = _VectorTraits<_Tp>>
+ // }}}
+ // __concat{{{
+ template <typename _Tp, typename _TVT = _VectorTraits<_Tp>,
+- typename _R = __vector_type_t<typename _TVT::value_type,
+- _TVT::_S_full_size * 2>>
++ typename _R = __vector_type_t<typename _TVT::value_type, _TVT::_S_full_size * 2>>
+ constexpr _R
+ __concat(_Tp a_, _Tp b_)
+ {
+@@ -2168,8 +2167,7 @@ template <int _Offset,
+ int _SplitBy,
+ typename _Tp,
+ typename _TVT = _VectorTraits<_Tp>,
+- typename _R = __vector_type_t<typename _TVT::value_type,
+- _TVT::_S_full_size / _SplitBy>>
++ typename _R = __vector_type_t<typename _TVT::value_type, _TVT::_S_full_size / _SplitBy>>
+ _GLIBCXX_SIMD_INTRINSIC constexpr _R
+ __extract(_Tp __in)
+ {
+@@ -2206,7 +2204,7 @@ template <int _Offset,
+ #endif
+ constexpr int _O = _Offset * __return_width;
+ return __call_with_subscripts<__return_width, _O>(
+- __x, [](auto... __entries) {
++ __x, [](auto... __entries) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ return reinterpret_cast<_R>(_Up{__entries...});
+ });
+ }
+@@ -2215,8 +2213,7 @@ template <int _Offset,
+ // }}}
+ // __lo/__hi64[z]{{{
+ template <typename _Tp,
+- typename _R
+- = __vector_type8_t<typename _VectorTraits<_Tp>::value_type>>
++ typename _R = __vector_type8_t<typename _VectorTraits<_Tp>::value_type>>
+ _GLIBCXX_SIMD_INTRINSIC constexpr _R
+ __lo64(_Tp __x)
+ {
+@@ -2226,8 +2223,7 @@ template <typename _Tp,
+ }
+
+ template <typename _Tp,
+- typename _R
+- = __vector_type8_t<typename _VectorTraits<_Tp>::value_type>>
++ typename _R = __vector_type8_t<typename _VectorTraits<_Tp>::value_type>>
+ _GLIBCXX_SIMD_INTRINSIC constexpr _R
+ __hi64(_Tp __x)
+ {
+@@ -2238,8 +2234,7 @@ template <typename _Tp,
+ }
+
+ template <typename _Tp,
+- typename _R
+- = __vector_type8_t<typename _VectorTraits<_Tp>::value_type>>
++ typename _R = __vector_type8_t<typename _VectorTraits<_Tp>::value_type>>
+ _GLIBCXX_SIMD_INTRINSIC constexpr _R
+ __hi64z([[maybe_unused]] _Tp __x)
+ {
+@@ -2350,18 +2345,15 @@ template <>
+ // the following excludes bool via __is_vectorizable
+ #if _GLIBCXX_SIMD_HAVE_SSE
+ template <typename _Tp, size_t _Bytes>
+- struct __intrinsic_type<_Tp, _Bytes,
+- enable_if_t<__is_vectorizable_v<_Tp> && _Bytes <= 64>>
++ struct __intrinsic_type<_Tp, _Bytes, enable_if_t<__is_vectorizable_v<_Tp> && _Bytes <= 64>>
+ {
+ static_assert(!is_same_v<_Tp, long double>,
+ "no __intrinsic_type support for long double on x86");
+
+- static constexpr size_t _S_VBytes = _Bytes <= 16 ? 16
+- : _Bytes <= 32 ? 32
+- : 64;
++ static constexpr size_t _S_VBytes = _Bytes <= 16 ? 16 : _Bytes <= 32 ? 32 : 64;
+
+ using type [[__gnu__::__vector_size__(_S_VBytes)]]
+- = conditional_t<is_integral_v<_Tp>, long long int, _Tp>;
++ = conditional_t<is_integral_v<_Tp>, long long int, _Tp>;
+ };
+ #endif // _GLIBCXX_SIMD_HAVE_SSE
+
+@@ -2377,15 +2369,21 @@ template <>
+ struct __intrinsic_type<float, 16, void>
+ { using type = float32x4_t; };
+
+-#if _GLIBCXX_SIMD_HAVE_NEON_A64
+ template <>
+ struct __intrinsic_type<double, 8, void>
+- { using type = float64x1_t; };
++ {
++#if _GLIBCXX_SIMD_HAVE_NEON_A64
++ using type = float64x1_t;
++#endif
++ };
+
+ template <>
+ struct __intrinsic_type<double, 16, void>
+- { using type = float64x2_t; };
++ {
++#if _GLIBCXX_SIMD_HAVE_NEON_A64
++ using type = float64x2_t;
+ #endif
++ };
+
+ #define _GLIBCXX_SIMD_ARM_INTRIN(_Bits, _Np) \
+ template <> \
+@@ -2407,16 +2405,19 @@ _GLIBCXX_SIMD_ARM_INTRIN(64, 2);
+ #undef _GLIBCXX_SIMD_ARM_INTRIN
+
+ template <typename _Tp, size_t _Bytes>
+- struct __intrinsic_type<_Tp, _Bytes,
+- enable_if_t<__is_vectorizable_v<_Tp> && _Bytes <= 16>>
++ struct __intrinsic_type<_Tp, _Bytes, enable_if_t<__is_vectorizable_v<_Tp> && _Bytes <= 16>>
+ {
+ static constexpr int _SVecBytes = _Bytes <= 8 ? 8 : 16;
++
+ using _Ip = __int_for_sizeof_t<_Tp>;
++
+ using _Up = conditional_t<
+ is_floating_point_v<_Tp>, _Tp,
+ conditional_t<is_unsigned_v<_Tp>, make_unsigned_t<_Ip>, _Ip>>;
++
+ static_assert(!is_same_v<_Tp, _Up> || _SVecBytes != _Bytes,
+ "should use explicit specialization above");
++
+ using type = typename __intrinsic_type<_Up, _SVecBytes>::type;
+ };
+ #endif // _GLIBCXX_SIMD_HAVE_NEON
+@@ -2451,23 +2452,54 @@ _GLIBCXX_SIMD_PPC_INTRIN(unsigned long long);
+ #undef _GLIBCXX_SIMD_PPC_INTRIN
+
+ template <typename _Tp, size_t _Bytes>
+- struct __intrinsic_type<_Tp, _Bytes,
+- enable_if_t<__is_vectorizable_v<_Tp> && _Bytes <= 16>>
++ struct __intrinsic_type<_Tp, _Bytes, enable_if_t<__is_vectorizable_v<_Tp> && _Bytes <= 16>>
+ {
+ static constexpr bool _S_is_ldouble = is_same_v<_Tp, long double>;
++
+ // allow _Tp == long double with -mlong-double-64
+ static_assert(!(_S_is_ldouble && sizeof(long double) > sizeof(double)),
+ "no __intrinsic_type support for 128-bit floating point on PowerPC");
++
+ #ifndef __VSX__
+ static_assert(!(is_same_v<_Tp, double>
+ || (_S_is_ldouble && sizeof(long double) == sizeof(double))),
+ "no __intrinsic_type support for 64-bit floating point on PowerPC w/o VSX");
+ #endif
+- using type =
+- typename __intrinsic_type_impl<
+- conditional_t<is_floating_point_v<_Tp>,
+- conditional_t<_S_is_ldouble, double, _Tp>,
+- __int_for_sizeof_t<_Tp>>>::type;
++
++ static constexpr auto __element_type()
++ {
++ if constexpr (is_floating_point_v<_Tp>)
++ {
++ if constexpr (_S_is_ldouble)
++ return double {};
++ else
++ return _Tp {};
++ }
++ else if constexpr (is_signed_v<_Tp>)
++ {
++ if constexpr (sizeof(_Tp) == sizeof(_SChar))
++ return _SChar {};
++ else if constexpr (sizeof(_Tp) == sizeof(short))
++ return short {};
++ else if constexpr (sizeof(_Tp) == sizeof(int))
++ return int {};
++ else if constexpr (sizeof(_Tp) == sizeof(_LLong))
++ return _LLong {};
++ }
++ else
++ {
++ if constexpr (sizeof(_Tp) == sizeof(_UChar))
++ return _UChar {};
++ else if constexpr (sizeof(_Tp) == sizeof(_UShort))
++ return _UShort {};
++ else if constexpr (sizeof(_Tp) == sizeof(_UInt))
++ return _UInt {};
++ else if constexpr (sizeof(_Tp) == sizeof(_ULLong))
++ return _ULLong {};
++ }
++ }
++
++ using type = typename __intrinsic_type_impl<decltype(__element_type())>::type;
+ };
+ #endif // __ALTIVEC__
+
+@@ -2483,22 +2515,29 @@ template <size_t _Width>
+ static constexpr size_t _S_full_size = sizeof(_BuiltinType) * __CHAR_BIT__;
+
+ _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper<bool, _S_full_size>
+- __as_full_vector() const { return _M_data; }
++ __as_full_vector() const
++ { return _M_data; }
+
+- _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper() = default;
+- _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper(_BuiltinType __k)
+- : _M_data(__k) {};
++ _GLIBCXX_SIMD_INTRINSIC constexpr
++ _SimdWrapper() = default;
++
++ _GLIBCXX_SIMD_INTRINSIC constexpr
++ _SimdWrapper(_BuiltinType __k) : _M_data(__k) {};
+
+- _GLIBCXX_SIMD_INTRINSIC operator const _BuiltinType&() const
++ _GLIBCXX_SIMD_INTRINSIC
++ operator const _BuiltinType&() const
+ { return _M_data; }
+
+- _GLIBCXX_SIMD_INTRINSIC operator _BuiltinType&()
++ _GLIBCXX_SIMD_INTRINSIC
++ operator _BuiltinType&()
+ { return _M_data; }
+
+- _GLIBCXX_SIMD_INTRINSIC _BuiltinType __intrin() const
++ _GLIBCXX_SIMD_INTRINSIC _BuiltinType
++ __intrin() const
+ { return _M_data; }
+
+- _GLIBCXX_SIMD_INTRINSIC constexpr value_type operator[](size_t __i) const
++ _GLIBCXX_SIMD_INTRINSIC constexpr value_type
++ operator[](size_t __i) const
+ { return _M_data & (_BuiltinType(1) << __i); }
+
+ template <size_t __i>
+@@ -2506,7 +2545,8 @@ template <size_t _Width>
+ operator[](_SizeConstant<__i>) const
+ { return _M_data & (_BuiltinType(1) << __i); }
+
+- _GLIBCXX_SIMD_INTRINSIC constexpr void _M_set(size_t __i, value_type __x)
++ _GLIBCXX_SIMD_INTRINSIC constexpr void
++ _M_set(size_t __i, value_type __x)
+ {
+ if (__x)
+ _M_data |= (_BuiltinType(1) << __i);
+@@ -2514,11 +2554,12 @@ template <size_t _Width>
+ _M_data &= ~(_BuiltinType(1) << __i);
+ }
+
+- _GLIBCXX_SIMD_INTRINSIC
+- constexpr bool _M_is_constprop() const
++ _GLIBCXX_SIMD_INTRINSIC constexpr bool
++ _M_is_constprop() const
+ { return __builtin_constant_p(_M_data); }
+
+- _GLIBCXX_SIMD_INTRINSIC constexpr bool _M_is_constprop_none_of() const
++ _GLIBCXX_SIMD_INTRINSIC constexpr bool
++ _M_is_constprop_none_of() const
+ {
+ if (__builtin_constant_p(_M_data))
+ {
+@@ -2530,7 +2571,8 @@ template <size_t _Width>
+ return false;
+ }
+
+- _GLIBCXX_SIMD_INTRINSIC constexpr bool _M_is_constprop_all_of() const
++ _GLIBCXX_SIMD_INTRINSIC constexpr bool
++ _M_is_constprop_all_of() const
+ {
+ if (__builtin_constant_p(_M_data))
+ {
+@@ -2552,10 +2594,11 @@ template <bool _MustZeroInitPadding, typename _BuiltinType>
+ template <typename _BuiltinType>
+ struct _SimdWrapperBase<false, _BuiltinType> // no padding or no SNaNs
+ {
+- _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapperBase() = default;
+- _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapperBase(_BuiltinType __init)
+- : _M_data(__init)
+- {}
++ _GLIBCXX_SIMD_INTRINSIC constexpr
++ _SimdWrapperBase() = default;
++
++ _GLIBCXX_SIMD_INTRINSIC constexpr
++ _SimdWrapperBase(_BuiltinType __init) : _M_data(__init) {}
+
+ _BuiltinType _M_data;
+ };
+@@ -2564,10 +2607,11 @@ template <typename _BuiltinType>
+ struct _SimdWrapperBase<true, _BuiltinType> // with padding that needs to
+ // never become SNaN
+ {
+- _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapperBase() : _M_data() {}
+- _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapperBase(_BuiltinType __init)
+- : _M_data(__init)
+- {}
++ _GLIBCXX_SIMD_INTRINSIC constexpr
++ _SimdWrapperBase() : _M_data() {}
++
++ _GLIBCXX_SIMD_INTRINSIC constexpr
++ _SimdWrapperBase(_BuiltinType __init) : _M_data(__init) {}
+
+ _BuiltinType _M_data;
+ };
+@@ -2606,24 +2650,33 @@ template <typename _Tp, size_t _Width>
+ __as_full_vector() const
+ { return _M_data; }
+
+- _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper(initializer_list<_Tp> __init)
+- : _Base(__generate_from_n_evaluations<_Width, _BuiltinType>(
+- [&](auto __i) { return __init.begin()[__i.value]; })) {}
++ _GLIBCXX_SIMD_INTRINSIC constexpr
++ _SimdWrapper(initializer_list<_Tp> __init)
++ : _Base(__generate_from_n_evaluations<_Width, _BuiltinType>(
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __init.begin()[__i.value];
++ })) {}
+
+- _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper() = default;
+- _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper(const _SimdWrapper&)
+- = default;
+- _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper(_SimdWrapper&&) = default;
++ _GLIBCXX_SIMD_INTRINSIC constexpr
++ _SimdWrapper() = default;
++
++ _GLIBCXX_SIMD_INTRINSIC constexpr
++ _SimdWrapper(const _SimdWrapper&) = default;
++
++ _GLIBCXX_SIMD_INTRINSIC constexpr
++ _SimdWrapper(_SimdWrapper&&) = default;
+
+ _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper&
+ operator=(const _SimdWrapper&) = default;
++
+ _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper&
+ operator=(_SimdWrapper&&) = default;
+
+ template <typename _V, typename = enable_if_t<disjunction_v<
+ is_same<_V, __vector_type_t<_Tp, _Width>>,
+ is_same<_V, __intrinsic_type_t<_Tp, _Width>>>>>
+- _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper(_V __x)
++ _GLIBCXX_SIMD_INTRINSIC constexpr
++ _SimdWrapper(_V __x)
+ // __vector_bitcast can convert e.g. __m128 to __vector(2) float
+ : _Base(__vector_bitcast<_Tp, _Width>(__x)) {}
+
+@@ -2633,33 +2686,46 @@ template <typename _Tp, size_t _Width>
+ _GLIBCXX_SIMD_INTRINSIC constexpr
+ operator _SimdTuple<_Tp, _As...>() const
+ {
+- const auto& dd = _M_data; // workaround for GCC7 ICE
+- return __generate_from_n_evaluations<sizeof...(_As),
+- _SimdTuple<_Tp, _As...>>([&](
+- auto __i) constexpr { return dd[int(__i)]; });
++ return __generate_from_n_evaluations<sizeof...(_As), _SimdTuple<_Tp, _As...>>(
++ [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
++ { return _M_data[int(__i)]; });
+ }
+
+- _GLIBCXX_SIMD_INTRINSIC constexpr operator const _BuiltinType&() const
++ _GLIBCXX_SIMD_INTRINSIC constexpr
++ operator const _BuiltinType&() const
+ { return _M_data; }
+
+- _GLIBCXX_SIMD_INTRINSIC constexpr operator _BuiltinType&()
++ _GLIBCXX_SIMD_INTRINSIC constexpr
++ operator _BuiltinType&()
+ { return _M_data; }
+
+- _GLIBCXX_SIMD_INTRINSIC constexpr _Tp operator[](size_t __i) const
++ _GLIBCXX_SIMD_INTRINSIC constexpr _Tp
++ operator[](size_t __i) const
+ { return _M_data[__i]; }
+
+ template <size_t __i>
+- _GLIBCXX_SIMD_INTRINSIC constexpr _Tp operator[](_SizeConstant<__i>) const
++ _GLIBCXX_SIMD_INTRINSIC constexpr _Tp
++ operator[](_SizeConstant<__i>) const
+ { return _M_data[__i]; }
+
+- _GLIBCXX_SIMD_INTRINSIC constexpr void _M_set(size_t __i, _Tp __x)
+- { _M_data[__i] = __x; }
++ _GLIBCXX_SIMD_INTRINSIC constexpr void
++ _M_set(size_t __i, _Tp __x)
++ {
++ if (__builtin_is_constant_evaluated())
++ _M_data = __generate_from_n_evaluations<_Width, _BuiltinType>([&](auto __j) {
++ return __j == __i ? __x : _M_data[__j()];
++ });
++ else
++ _M_data[__i] = __x;
++ }
+
+ _GLIBCXX_SIMD_INTRINSIC
+- constexpr bool _M_is_constprop() const
++ constexpr bool
++ _M_is_constprop() const
+ { return __builtin_constant_p(_M_data); }
+
+- _GLIBCXX_SIMD_INTRINSIC constexpr bool _M_is_constprop_none_of() const
++ _GLIBCXX_SIMD_INTRINSIC constexpr bool
++ _M_is_constprop_none_of() const
+ {
+ if (__builtin_constant_p(_M_data))
+ {
+@@ -2680,7 +2746,8 @@ template <typename _Tp, size_t _Width>
+ return false;
+ }
+
+- _GLIBCXX_SIMD_INTRINSIC constexpr bool _M_is_constprop_all_of() const
++ _GLIBCXX_SIMD_INTRINSIC constexpr bool
++ _M_is_constprop_all_of() const
+ {
+ if (__builtin_constant_p(_M_data))
+ {
+@@ -2812,6 +2879,32 @@ template <typename _Tp>
+ } // namespace simd_abi
+
+ // traits {{{1
++template <typename _Tp>
++ struct is_simd_flag_type
++ : false_type
++ {};
++
++template <>
++ struct is_simd_flag_type<element_aligned_tag>
++ : true_type
++ {};
++
++template <>
++ struct is_simd_flag_type<vector_aligned_tag>
++ : true_type
++ {};
++
++template <size_t _Np>
++ struct is_simd_flag_type<overaligned_tag<_Np>>
++ : __bool_constant<(_Np > 0) and __has_single_bit(_Np)>
++ {};
++
++template <typename _Tp>
++ inline constexpr bool is_simd_flag_type_v = is_simd_flag_type<_Tp>::value;
++
++template <typename _Tp, typename = enable_if_t<is_simd_flag_type_v<_Tp>>>
++ using _IsSimdFlagType = _Tp;
++
+ // is_abi_tag {{{2
+ template <typename _Tp, typename = void_t<>>
+ struct is_abi_tag : false_type {};
+@@ -2878,22 +2971,14 @@ template <typename _Tp, typename _V, typename = void>
+ struct rebind_simd;
+
+ template <typename _Tp, typename _Up, typename _Abi>
+- struct rebind_simd<
+- _Tp, simd<_Up, _Abi>,
+- void_t<simd_abi::deduce_t<_Tp, simd_size_v<_Up, _Abi>, _Abi>>>
+- {
+- using type
+- = simd<_Tp, simd_abi::deduce_t<_Tp, simd_size_v<_Up, _Abi>, _Abi>>;
+- };
++ struct rebind_simd<_Tp, simd<_Up, _Abi>,
++ void_t<simd_abi::deduce_t<_Tp, simd_size_v<_Up, _Abi>, _Abi>>>
++ { using type = simd<_Tp, simd_abi::deduce_t<_Tp, simd_size_v<_Up, _Abi>, _Abi>>; };
+
+ template <typename _Tp, typename _Up, typename _Abi>
+- struct rebind_simd<
+- _Tp, simd_mask<_Up, _Abi>,
+- void_t<simd_abi::deduce_t<_Tp, simd_size_v<_Up, _Abi>, _Abi>>>
+- {
+- using type
+- = simd_mask<_Tp, simd_abi::deduce_t<_Tp, simd_size_v<_Up, _Abi>, _Abi>>;
+- };
++ struct rebind_simd<_Tp, simd_mask<_Up, _Abi>,
++ void_t<simd_abi::deduce_t<_Tp, simd_size_v<_Up, _Abi>, _Abi>>>
++ { using type = simd_mask<_Tp, simd_abi::deduce_t<_Tp, simd_size_v<_Up, _Abi>, _Abi>>; };
+
+ template <typename _Tp, typename _V>
+ using rebind_simd_t = typename rebind_simd<_Tp, _V>::type;
+@@ -2903,13 +2988,11 @@ template <int _Np, typename _V, typename = void>
+ struct resize_simd;
+
+ template <int _Np, typename _Tp, typename _Abi>
+- struct resize_simd<_Np, simd<_Tp, _Abi>,
+- void_t<simd_abi::deduce_t<_Tp, _Np, _Abi>>>
++ struct resize_simd<_Np, simd<_Tp, _Abi>, void_t<simd_abi::deduce_t<_Tp, _Np, _Abi>>>
+ { using type = simd<_Tp, simd_abi::deduce_t<_Tp, _Np, _Abi>>; };
+
+ template <int _Np, typename _Tp, typename _Abi>
+- struct resize_simd<_Np, simd_mask<_Tp, _Abi>,
+- void_t<simd_abi::deduce_t<_Tp, _Np, _Abi>>>
++ struct resize_simd<_Np, simd_mask<_Tp, _Abi>, void_t<simd_abi::deduce_t<_Tp, _Np, _Abi>>>
+ { using type = simd_mask<_Tp, simd_abi::deduce_t<_Tp, _Np, _Abi>>; };
+
+ template <int _Np, typename _V>
+@@ -2958,13 +3041,11 @@ template <typename _Tp, size_t _Np>
+
+ // casts [simd.casts] {{{1
+ // static_simd_cast {{{2
+-template <typename _Tp, typename _Up, typename _Ap, bool = is_simd_v<_Tp>,
+- typename = void>
++template <typename _Tp, typename _Up, typename _Ap, bool = is_simd_v<_Tp>, typename = void>
+ struct __static_simd_cast_return_type;
+
+ template <typename _Tp, typename _A0, typename _Up, typename _Ap>
+- struct __static_simd_cast_return_type<simd_mask<_Tp, _A0>, _Up, _Ap, false,
+- void>
++ struct __static_simd_cast_return_type<simd_mask<_Tp, _A0>, _Up, _Ap, false, void>
+ : __static_simd_cast_return_type<simd<_Tp, _A0>, _Up, _Ap> {};
+
+ template <typename _Tp, typename _Up, typename _Ap>
+@@ -3147,6 +3228,10 @@ template <typename _Tp, typename _Up, typename _Ap>
+ {
+ if constexpr (is_same_v<typename _Tp::abi_type, _Ap>)
+ return __x;
++ else if (__builtin_is_constant_evaluated())
++ return _Tp([&](auto __i) constexpr {
++ return __i < simd_size_v<_Up, _Ap> ? __x[__i] : _Up();
++ });
+ else if constexpr (simd_size_v<_Up, _Ap> == 1)
+ {
+ _Tp __r{};
+@@ -3193,21 +3278,19 @@ template <typename _Tp, int _Np>
+ { return __x; }
+
+ template <typename _Tp, typename _Ap>
+- _GLIBCXX_SIMD_INTRINSIC auto
++ _GLIBCXX_SIMD_INTRINSIC fixed_size_simd<_Tp, simd_size_v<_Tp, _Ap>>
+ to_fixed_size(const simd<_Tp, _Ap>& __x)
+ {
+- return simd<_Tp, simd_abi::fixed_size<simd_size_v<_Tp, _Ap>>>([&__x](
+- auto __i) constexpr { return __x[__i]; });
++ using _Rp = fixed_size_simd<_Tp, simd_size_v<_Tp, _Ap>>;
++ return _Rp([&__x](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { return __x[__i]; });
+ }
+
+ template <typename _Tp, typename _Ap>
+- _GLIBCXX_SIMD_INTRINSIC auto
++ _GLIBCXX_SIMD_INTRINSIC fixed_size_simd_mask<_Tp, simd_size_v<_Tp, _Ap>>
+ to_fixed_size(const simd_mask<_Tp, _Ap>& __x)
+ {
+- constexpr int _Np = simd_mask<_Tp, _Ap>::size();
+- fixed_size_simd_mask<_Tp, _Np> __r;
+- __execute_n_times<_Np>([&](auto __i) constexpr { __r[__i] = __x[__i]; });
+- return __r;
++ return {__private_init,
++ [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { return __x[__i]; }};
+ }
+
+ // to_native {{{2
+@@ -3221,16 +3304,18 @@ template <typename _Tp, int _Np>
+ return {__mem, vector_aligned};
+ }
+
+-template <typename _Tp, size_t _Np>
++template <typename _Tp, int _Np>
+ _GLIBCXX_SIMD_INTRINSIC
+ enable_if_t<(_Np == native_simd_mask<_Tp>::size()), native_simd_mask<_Tp>>
+ to_native(const fixed_size_simd_mask<_Tp, _Np>& __x)
+ {
+- return native_simd_mask<_Tp>([&](auto __i) constexpr { return __x[__i]; });
++ return native_simd_mask<_Tp>(
++ __private_init,
++ [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { return __x[__i]; });
+ }
+
+ // to_compatible {{{2
+-template <typename _Tp, size_t _Np>
++template <typename _Tp, int _Np>
+ _GLIBCXX_SIMD_INTRINSIC enable_if_t<(_Np == simd<_Tp>::size()), simd<_Tp>>
+ to_compatible(const simd<_Tp, simd_abi::fixed_size<_Np>>& __x)
+ {
+@@ -3239,11 +3324,15 @@ template <typename _Tp, size_t _Np>
+ return {__mem, vector_aligned};
+ }
+
+-template <typename _Tp, size_t _Np>
++template <typename _Tp, int _Np>
+ _GLIBCXX_SIMD_INTRINSIC
+ enable_if_t<(_Np == simd_mask<_Tp>::size()), simd_mask<_Tp>>
+ to_compatible(const simd_mask<_Tp, simd_abi::fixed_size<_Np>>& __x)
+- { return simd_mask<_Tp>([&](auto __i) constexpr { return __x[__i]; }); }
++ {
++ return simd_mask<_Tp>(
++ __private_init,
++ [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { return __x[__i]; });
++ }
+
+ // masked assignment [simd_mask.where] {{{1
+
+@@ -3276,12 +3365,14 @@ template <typename _M, typename _Tp>
+
+ public:
+ const_where_expression(const const_where_expression&) = delete;
++
+ const_where_expression& operator=(const const_where_expression&) = delete;
+
+- _GLIBCXX_SIMD_INTRINSIC const_where_expression(const _M& __kk, const _Tp& dd)
+- : _M_k(__kk), _M_value(const_cast<_Tp&>(dd)) {}
++ _GLIBCXX_SIMD_INTRINSIC constexpr
++ const_where_expression(const _M& __kk, const _Tp& dd)
++ : _M_k(__kk), _M_value(const_cast<_Tp&>(dd)) {}
+
+- _GLIBCXX_SIMD_INTRINSIC _V
++ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR _V
+ operator-() const&&
+ {
+ return {__private_init,
+@@ -3290,8 +3381,8 @@ template <typename _M, typename _Tp>
+ }
+
+ template <typename _Up, typename _Flags>
+- [[nodiscard]] _GLIBCXX_SIMD_INTRINSIC _V
+- copy_from(const _LoadStorePtr<_Up, value_type>* __mem, _Flags) const&&
++ [[nodiscard]] _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR _V
++ copy_from(const _LoadStorePtr<_Up, value_type>* __mem, _IsSimdFlagType<_Flags>) const&&
+ {
+ return {__private_init,
+ _Impl::_S_masked_load(__data(_M_value), __data(_M_k),
+@@ -3299,8 +3390,8 @@ template <typename _M, typename _Tp>
+ }
+
+ template <typename _Up, typename _Flags>
+- _GLIBCXX_SIMD_INTRINSIC void
+- copy_to(_LoadStorePtr<_Up, value_type>* __mem, _Flags) const&&
++ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR void
++ copy_to(_LoadStorePtr<_Up, value_type>* __mem, _IsSimdFlagType<_Flags>) const&&
+ {
+ _Impl::_S_masked_store(__data(_M_value),
+ _Flags::template _S_apply<_V>(__mem),
+@@ -3320,8 +3411,8 @@ template <typename _Tp>
+ struct _Wrapper { using value_type = _V; };
+
+ protected:
+- using value_type =
+- typename conditional_t<is_arithmetic_v<_V>, _Wrapper, _V>::value_type;
++ using value_type
++ = typename conditional_t<is_arithmetic_v<_V>, _Wrapper, _V>::value_type;
+
+ _GLIBCXX_SIMD_INTRINSIC friend const _M&
+ __get_mask(const const_where_expression& __x)
+@@ -3338,20 +3429,22 @@ template <typename _Tp>
+ const_where_expression(const const_where_expression&) = delete;
+ const_where_expression& operator=(const const_where_expression&) = delete;
+
+- _GLIBCXX_SIMD_INTRINSIC const_where_expression(const bool __kk, const _Tp& dd)
+- : _M_k(__kk), _M_value(const_cast<_Tp&>(dd)) {}
++ _GLIBCXX_SIMD_INTRINSIC constexpr
++ const_where_expression(const bool __kk, const _Tp& dd)
++ : _M_k(__kk), _M_value(const_cast<_Tp&>(dd)) {}
+
+- _GLIBCXX_SIMD_INTRINSIC _V operator-() const&&
++ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR _V
++ operator-() const&&
+ { return _M_k ? -_M_value : _M_value; }
+
+ template <typename _Up, typename _Flags>
+- [[nodiscard]] _GLIBCXX_SIMD_INTRINSIC _V
+- copy_from(const _LoadStorePtr<_Up, value_type>* __mem, _Flags) const&&
++ [[nodiscard]] _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR _V
++ copy_from(const _LoadStorePtr<_Up, value_type>* __mem, _IsSimdFlagType<_Flags>) const&&
+ { return _M_k ? static_cast<_V>(__mem[0]) : _M_value; }
+
+ template <typename _Up, typename _Flags>
+- _GLIBCXX_SIMD_INTRINSIC void
+- copy_to(_LoadStorePtr<_Up, value_type>* __mem, _Flags) const&&
++ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR void
++ copy_to(_LoadStorePtr<_Up, value_type>* __mem, _IsSimdFlagType<_Flags>) const&&
+ {
+ if (_M_k)
+ __mem[0] = _M_value;
+@@ -3376,18 +3469,21 @@ template <typename _M, typename _Tp>
+ is_same<typename _M::abi_type, typename _Tp::abi_type>::value, "");
+ static_assert(_M::size() == _Tp::size(), "");
+
+- _GLIBCXX_SIMD_INTRINSIC friend _Tp& __get_lvalue(where_expression& __x)
++ _GLIBCXX_SIMD_INTRINSIC friend constexpr _Tp&
++ __get_lvalue(where_expression& __x)
+ { return __x._M_value; }
+
+ public:
+ where_expression(const where_expression&) = delete;
+ where_expression& operator=(const where_expression&) = delete;
+
+- _GLIBCXX_SIMD_INTRINSIC where_expression(const _M& __kk, _Tp& dd)
+- : const_where_expression<_M, _Tp>(__kk, dd) {}
++ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
++ where_expression(const _M& __kk, _Tp& dd)
++ : const_where_expression<_M, _Tp>(__kk, dd) {}
+
+ template <typename _Up>
+- _GLIBCXX_SIMD_INTRINSIC void operator=(_Up&& __x) &&
++ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR void
++ operator=(_Up&& __x) &&
+ {
+ _Impl::_S_masked_assign(__data(_M_k), __data(_M_value),
+ __to_value_type_or_member_type<_Tp>(
+@@ -3396,14 +3492,15 @@ template <typename _M, typename _Tp>
+
+ #define _GLIBCXX_SIMD_OP_(__op, __name) \
+ template <typename _Up> \
+- _GLIBCXX_SIMD_INTRINSIC void operator __op##=(_Up&& __x)&& \
++ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR void \
++ operator __op##=(_Up&& __x)&& \
+ { \
+ _Impl::template _S_masked_cassign( \
+ __data(_M_k), __data(_M_value), \
+ __to_value_type_or_member_type<_Tp>(static_cast<_Up&&>(__x)), \
+- [](auto __impl, auto __lhs, auto __rhs) constexpr { \
+- return __impl.__name(__lhs, __rhs); \
+- }); \
++ [](auto __impl, auto __lhs, auto __rhs) \
++ constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA \
++ { return __impl.__name(__lhs, __rhs); }); \
+ } \
+ static_assert(true)
+ _GLIBCXX_SIMD_OP_(+, _S_plus);
+@@ -3418,48 +3515,48 @@ template <typename _M, typename _Tp>
+ _GLIBCXX_SIMD_OP_(>>, _S_shift_right);
+ #undef _GLIBCXX_SIMD_OP_
+
+- _GLIBCXX_SIMD_INTRINSIC void operator++() &&
++ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR void
++ operator++() &&
+ {
+ __data(_M_value)
+- = _Impl::template _S_masked_unary<__increment>(__data(_M_k),
+- __data(_M_value));
++ = _Impl::template _S_masked_unary<__increment>(__data(_M_k), __data(_M_value));
+ }
+
+- _GLIBCXX_SIMD_INTRINSIC void operator++(int) &&
++ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR void
++ operator++(int) &&
+ {
+ __data(_M_value)
+- = _Impl::template _S_masked_unary<__increment>(__data(_M_k),
+- __data(_M_value));
++ = _Impl::template _S_masked_unary<__increment>(__data(_M_k), __data(_M_value));
+ }
+
+- _GLIBCXX_SIMD_INTRINSIC void operator--() &&
++ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR void
++ operator--() &&
+ {
+ __data(_M_value)
+- = _Impl::template _S_masked_unary<__decrement>(__data(_M_k),
+- __data(_M_value));
++ = _Impl::template _S_masked_unary<__decrement>(__data(_M_k), __data(_M_value));
+ }
+
+- _GLIBCXX_SIMD_INTRINSIC void operator--(int) &&
++ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR void
++ operator--(int) &&
+ {
+ __data(_M_value)
+- = _Impl::template _S_masked_unary<__decrement>(__data(_M_k),
+- __data(_M_value));
++ = _Impl::template _S_masked_unary<__decrement>(__data(_M_k), __data(_M_value));
+ }
+
+ // intentionally hides const_where_expression::copy_from
+ template <typename _Up, typename _Flags>
+- _GLIBCXX_SIMD_INTRINSIC void
+- copy_from(const _LoadStorePtr<_Up, value_type>* __mem, _Flags) &&
++ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR void
++ copy_from(const _LoadStorePtr<_Up, value_type>* __mem, _IsSimdFlagType<_Flags>) &&
+ {
+- __data(_M_value)
+- = _Impl::_S_masked_load(__data(_M_value), __data(_M_k),
+- _Flags::template _S_apply<_Tp>(__mem));
++ __data(_M_value) = _Impl::_S_masked_load(__data(_M_value), __data(_M_k),
++ _Flags::template _S_apply<_Tp>(__mem));
+ }
+ };
+
+ // where_expression<bool, T> {{{2
+ template <typename _Tp>
+- class where_expression<bool, _Tp> : public const_where_expression<bool, _Tp>
++ class where_expression<bool, _Tp>
++ : public const_where_expression<bool, _Tp>
+ {
+ using _M = bool;
+ using typename const_where_expression<_M, _Tp>::value_type;
+@@ -3470,12 +3567,14 @@ template <typename _Tp>
+ where_expression(const where_expression&) = delete;
+ where_expression& operator=(const where_expression&) = delete;
+
+- _GLIBCXX_SIMD_INTRINSIC where_expression(const _M& __kk, _Tp& dd)
+- : const_where_expression<_M, _Tp>(__kk, dd) {}
++ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
++ where_expression(const _M& __kk, _Tp& dd)
++ : const_where_expression<_M, _Tp>(__kk, dd) {}
+
+ #define _GLIBCXX_SIMD_OP_(__op) \
+ template <typename _Up> \
+- _GLIBCXX_SIMD_INTRINSIC void operator __op(_Up&& __x)&& \
++ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR void \
++ operator __op(_Up&& __x)&& \
+ { if (_M_k) _M_value __op static_cast<_Up&&>(__x); }
+
+ _GLIBCXX_SIMD_OP_(=)
+@@ -3491,67 +3590,71 @@ template <typename _Tp>
+ _GLIBCXX_SIMD_OP_(>>=)
+ #undef _GLIBCXX_SIMD_OP_
+
+- _GLIBCXX_SIMD_INTRINSIC void operator++() &&
++ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR void
++ operator++() &&
+ { if (_M_k) ++_M_value; }
+
+- _GLIBCXX_SIMD_INTRINSIC void operator++(int) &&
++ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR void
++ operator++(int) &&
+ { if (_M_k) ++_M_value; }
+
+- _GLIBCXX_SIMD_INTRINSIC void operator--() &&
++ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR void
++ operator--() &&
+ { if (_M_k) --_M_value; }
+
+- _GLIBCXX_SIMD_INTRINSIC void operator--(int) &&
++ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR void
++ operator--(int) &&
+ { if (_M_k) --_M_value; }
+
+ // intentionally hides const_where_expression::copy_from
+ template <typename _Up, typename _Flags>
+- _GLIBCXX_SIMD_INTRINSIC void
+- copy_from(const _LoadStorePtr<_Up, value_type>* __mem, _Flags) &&
++ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR void
++ copy_from(const _LoadStorePtr<_Up, value_type>* __mem, _IsSimdFlagType<_Flags>) &&
+ { if (_M_k) _M_value = __mem[0]; }
+ };
+
+ // where {{{1
+ template <typename _Tp, typename _Ap>
+- _GLIBCXX_SIMD_INTRINSIC where_expression<simd_mask<_Tp, _Ap>, simd<_Tp, _Ap>>
++ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
++ where_expression<simd_mask<_Tp, _Ap>, simd<_Tp, _Ap>>
+ where(const typename simd<_Tp, _Ap>::mask_type& __k, simd<_Tp, _Ap>& __value)
+ { return {__k, __value}; }
+
+ template <typename _Tp, typename _Ap>
+- _GLIBCXX_SIMD_INTRINSIC
+- const_where_expression<simd_mask<_Tp, _Ap>, simd<_Tp, _Ap>>
+- where(const typename simd<_Tp, _Ap>::mask_type& __k,
+- const simd<_Tp, _Ap>& __value)
++ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
++ const_where_expression<simd_mask<_Tp, _Ap>, simd<_Tp, _Ap>>
++ where(const typename simd<_Tp, _Ap>::mask_type& __k, const simd<_Tp, _Ap>& __value)
+ { return {__k, __value}; }
+
+ template <typename _Tp, typename _Ap>
+- _GLIBCXX_SIMD_INTRINSIC
+- where_expression<simd_mask<_Tp, _Ap>, simd_mask<_Tp, _Ap>>
+- where(const remove_const_t<simd_mask<_Tp, _Ap>>& __k,
+- simd_mask<_Tp, _Ap>& __value)
++ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
++ where_expression<simd_mask<_Tp, _Ap>, simd_mask<_Tp, _Ap>>
++ where(const remove_const_t<simd_mask<_Tp, _Ap>>& __k, simd_mask<_Tp, _Ap>& __value)
+ { return {__k, __value}; }
+
+ template <typename _Tp, typename _Ap>
+- _GLIBCXX_SIMD_INTRINSIC
+- const_where_expression<simd_mask<_Tp, _Ap>, simd_mask<_Tp, _Ap>>
+- where(const remove_const_t<simd_mask<_Tp, _Ap>>& __k,
+- const simd_mask<_Tp, _Ap>& __value)
++ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
++ const_where_expression<simd_mask<_Tp, _Ap>, simd_mask<_Tp, _Ap>>
++ where(const remove_const_t<simd_mask<_Tp, _Ap>>& __k, const simd_mask<_Tp, _Ap>& __value)
+ { return {__k, __value}; }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC where_expression<bool, _Tp>
++ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR where_expression<bool, _Tp>
+ where(_ExactBool __k, _Tp& __value)
+ { return {__k, __value}; }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC const_where_expression<bool, _Tp>
++ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR const_where_expression<bool, _Tp>
+ where(_ExactBool __k, const _Tp& __value)
+ { return {__k, __value}; }
+
+- template <typename _Tp, typename _Ap>
+- void where(bool __k, simd<_Tp, _Ap>& __value) = delete;
++template <typename _Tp, typename _Ap>
++ _GLIBCXX_SIMD_CONSTEXPR void
++ where(bool __k, simd<_Tp, _Ap>& __value) = delete;
+
+- template <typename _Tp, typename _Ap>
+- void where(bool __k, const simd<_Tp, _Ap>& __value) = delete;
++template <typename _Tp, typename _Ap>
++ _GLIBCXX_SIMD_CONSTEXPR void
++ where(bool __k, const simd<_Tp, _Ap>& __value) = delete;
+
+ // proposed mask iterations {{{1
+ namespace __proposed {
+@@ -3568,10 +3671,12 @@ template <size_t _Np>
+ size_t __mask;
+ size_t __bit;
+
+- _GLIBCXX_SIMD_INTRINSIC void __next_bit()
++ _GLIBCXX_SIMD_INTRINSIC void
++ __next_bit()
+ { __bit = __builtin_ctzl(__mask); }
+
+- _GLIBCXX_SIMD_INTRINSIC void __reset_lsb()
++ _GLIBCXX_SIMD_INTRINSIC void
++ __reset_lsb()
+ {
+ // 01100100 - 1 = 01100011
+ __mask &= (__mask - 1);
+@@ -3583,20 +3688,24 @@ template <size_t _Np>
+ iterator(const iterator&) = default;
+ iterator(iterator&&) = default;
+
+- _GLIBCXX_SIMD_ALWAYS_INLINE size_t operator->() const
++ _GLIBCXX_SIMD_ALWAYS_INLINE size_t
++ operator->() const
+ { return __bit; }
+
+- _GLIBCXX_SIMD_ALWAYS_INLINE size_t operator*() const
++ _GLIBCXX_SIMD_ALWAYS_INLINE size_t
++ operator*() const
+ { return __bit; }
+
+- _GLIBCXX_SIMD_ALWAYS_INLINE iterator& operator++()
++ _GLIBCXX_SIMD_ALWAYS_INLINE iterator&
++ operator++()
+ {
+ __reset_lsb();
+ __next_bit();
+ return *this;
+ }
+
+- _GLIBCXX_SIMD_ALWAYS_INLINE iterator operator++(int)
++ _GLIBCXX_SIMD_ALWAYS_INLINE iterator
++ operator++(int)
+ {
+ iterator __tmp = *this;
+ __reset_lsb();
+@@ -3604,17 +3713,21 @@ template <size_t _Np>
+ return __tmp;
+ }
+
+- _GLIBCXX_SIMD_ALWAYS_INLINE bool operator==(const iterator& __rhs) const
++ _GLIBCXX_SIMD_ALWAYS_INLINE bool
++ operator==(const iterator& __rhs) const
+ { return __mask == __rhs.__mask; }
+
+- _GLIBCXX_SIMD_ALWAYS_INLINE bool operator!=(const iterator& __rhs) const
++ _GLIBCXX_SIMD_ALWAYS_INLINE bool
++ operator!=(const iterator& __rhs) const
+ { return __mask != __rhs.__mask; }
+ };
+
+- iterator begin() const
++ iterator
++ begin() const
+ { return __bits.to_ullong(); }
+
+- iterator end() const
++ iterator
++ end() const
+ { return 0; }
+ };
+
+@@ -3629,15 +3742,13 @@ template <typename _Tp, typename _Ap>
+ // reductions [simd.reductions] {{{1
+ template <typename _Tp, typename _Abi, typename _BinaryOperation = plus<>>
+ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR _Tp
+- reduce(const simd<_Tp, _Abi>& __v,
+- _BinaryOperation __binary_op = _BinaryOperation())
++ reduce(const simd<_Tp, _Abi>& __v, _BinaryOperation __binary_op = _BinaryOperation())
+ { return _Abi::_SimdImpl::_S_reduce(__v, __binary_op); }
+
+ template <typename _M, typename _V, typename _BinaryOperation = plus<>>
+ _GLIBCXX_SIMD_INTRINSIC typename _V::value_type
+ reduce(const const_where_expression<_M, _V>& __x,
+- typename _V::value_type __identity_element,
+- _BinaryOperation __binary_op)
++ typename _V::value_type __identity_element, _BinaryOperation __binary_op)
+ {
+ if (__builtin_expect(none_of(__get_mask(__x)), false))
+ return __identity_element;
+@@ -3676,16 +3787,12 @@ template <typename _M, typename _V>
+ template <typename _Tp, typename _Abi>
+ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR _Tp
+ hmin(const simd<_Tp, _Abi>& __v) noexcept
+- {
+- return _Abi::_SimdImpl::_S_reduce(__v, __detail::_Minimum());
+- }
++ { return _Abi::_SimdImpl::_S_reduce(__v, __detail::_Minimum()); }
+
+ template <typename _Tp, typename _Abi>
+ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR _Tp
+ hmax(const simd<_Tp, _Abi>& __v) noexcept
+- {
+- return _Abi::_SimdImpl::_S_reduce(__v, __detail::_Maximum());
+- }
++ { return _Abi::_SimdImpl::_S_reduce(__v, __detail::_Maximum()); }
+
+ template <typename _M, typename _V>
+ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
+@@ -3753,8 +3860,7 @@ template <typename _Tp, typename _Ap>
+
+ template <typename _Tp, typename _Ap>
+ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR simd<_Tp, _Ap>
+- clamp(const simd<_Tp, _Ap>& __v, const simd<_Tp, _Ap>& __lo,
+- const simd<_Tp, _Ap>& __hi)
++ clamp(const simd<_Tp, _Ap>& __v, const simd<_Tp, _Ap>& __lo, const simd<_Tp, _Ap>& __hi)
+ {
+ using _Impl = typename _Ap::_SimdImpl;
+ return {__private_init,
+@@ -3771,13 +3877,12 @@ template <size_t... _Sizes, typename _Tp, typename _Ap,
+
+ // __extract_part {{{
+ template <int _Index, int _Total, int _Combine = 1, typename _Tp, size_t _Np>
+- _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_CONST
++ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_CONST constexpr
+ _SimdWrapper<_Tp, _Np / _Total * _Combine>
+ __extract_part(const _SimdWrapper<_Tp, _Np> __x);
+
+-template <int _Index, int _Parts, int _Combine = 1, typename _Tp, typename _A0,
+- typename... _As>
+- _GLIBCXX_SIMD_INTRINSIC auto
++template <int _Index, int _Parts, int _Combine = 1, typename _Tp, typename _A0, typename... _As>
++ _GLIBCXX_SIMD_INTRINSIC constexpr auto
+ __extract_part(const _SimdTuple<_Tp, _A0, _As...>& __x);
+
+ // }}}
+@@ -3786,7 +3891,8 @@ template <size_t _V0, size_t... _Values>
+ struct _SizeList
+ {
+ template <size_t _I>
+- static constexpr size_t _S_at(_SizeConstant<_I> = {})
++ static constexpr size_t
++ _S_at(_SizeConstant<_I> = {})
+ {
+ if constexpr (_I == 0)
+ return _V0;
+@@ -3795,7 +3901,8 @@ template <size_t _V0, size_t... _Values>
+ }
+
+ template <size_t _I>
+- static constexpr auto _S_before(_SizeConstant<_I> = {})
++ static constexpr auto
++ _S_before(_SizeConstant<_I> = {})
+ {
+ if constexpr (_I == 0)
+ return _SizeConstant<0>();
+@@ -3805,7 +3912,8 @@ template <size_t _V0, size_t... _Values>
+ }
+
+ template <size_t _Np>
+- static constexpr auto _S_pop_front(_SizeConstant<_Np> = {})
++ static constexpr auto
++ _S_pop_front(_SizeConstant<_Np> = {})
+ {
+ if constexpr (_Np == 0)
+ return _SizeList();
+@@ -3900,12 +4008,11 @@ template <typename _V, typename _Ap,
+ }
+ else if (__x._M_is_constprop())
+ {
+- return __generate_from_n_evaluations<_Parts, array<_V, _Parts>>([&](
+- auto __i) constexpr {
+- return _V([&](auto __j) constexpr {
+- return __x[__i * _V::size() + __j];
+- });
+- });
++ return __generate_from_n_evaluations<_Parts, array<_V, _Parts>>(
++ [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return _V([&](auto __j) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
++ { return __x[__i * _V::size() + __j]; });
++ });
+ }
+ else if constexpr (
+ __is_fixed_size_abi_v<_Ap>
+@@ -3918,49 +4025,47 @@ template <typename _V, typename _Ap,
+ #ifdef _GLIBCXX_SIMD_USE_ALIASING_LOADS
+ const __may_alias<_Tp>* const __element_ptr
+ = reinterpret_cast<const __may_alias<_Tp>*>(&__data(__x));
+- return __generate_from_n_evaluations<_Parts, array<_V, _Parts>>([&](
+- auto __i) constexpr {
+- return _V(__element_ptr + __i * _V::size(), vector_aligned);
+- });
++ return __generate_from_n_evaluations<_Parts, array<_V, _Parts>>(
++ [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
++ { return _V(__element_ptr + __i * _V::size(), vector_aligned); });
+ #else
+ const auto& __xx = __data(__x);
+- return __generate_from_n_evaluations<_Parts, array<_V, _Parts>>([&](
+- auto __i) constexpr {
+- [[maybe_unused]] constexpr size_t __offset
+- = decltype(__i)::value * _V::size();
+- return _V([&](auto __j) constexpr {
+- constexpr _SizeConstant<__j + __offset> __k;
+- return __xx[__k];
+- });
+- });
++ return __generate_from_n_evaluations<_Parts, array<_V, _Parts>>(
++ [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ [[maybe_unused]] constexpr size_t __offset
++ = decltype(__i)::value * _V::size();
++ return _V([&](auto __j) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ constexpr _SizeConstant<__j + __offset> __k;
++ return __xx[__k];
++ });
++ });
+ #endif
+ }
+ else if constexpr (is_same_v<typename _V::abi_type, simd_abi::scalar>)
+ {
+ // normally memcpy should work here as well
+- return __generate_from_n_evaluations<_Parts, array<_V, _Parts>>([&](
+- auto __i) constexpr { return __x[__i]; });
++ return __generate_from_n_evaluations<_Parts, array<_V, _Parts>>(
++ [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { return __x[__i]; });
+ }
+ else
+ {
+- return __generate_from_n_evaluations<_Parts, array<_V, _Parts>>([&](
+- auto __i) constexpr {
+- if constexpr (__is_fixed_size_abi_v<typename _V::abi_type>)
+- return _V([&](auto __j) constexpr {
+- return __x[__i * _V::size() + __j];
+- });
+- else
+- return _V(__private_init,
+- __extract_part<decltype(__i)::value, _Parts>(__data(__x)));
+- });
++ return __generate_from_n_evaluations<_Parts, array<_V, _Parts>>(
++ [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ if constexpr (__is_fixed_size_abi_v<typename _V::abi_type>)
++ return _V([&](auto __j) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __x[__i * _V::size() + __j];
++ });
++ else
++ return _V(__private_init,
++ __extract_part<decltype(__i)::value, _Parts>(__data(__x)));
++ });
+ }
+ }
+
+ // }}}
+ // split<simd_mask>(simd_mask) {{{
+ template <typename _V, typename _Ap,
+- size_t _Parts
+- = simd_size_v<typename _V::simd_type::value_type, _Ap> / _V::size()>
++ size_t _Parts = simd_size_v<typename _V::simd_type::value_type, _Ap> / _V::size()>
+ enable_if_t<is_simd_mask_v<_V> && simd_size_v<typename
+ _V::simd_type::value_type, _Ap> == _Parts * _V::size(), array<_V, _Parts>>
+ split(const simd_mask<typename _V::simd_type::value_type, _Ap>& __x)
+@@ -3976,22 +4081,22 @@ template <typename _V, typename _Ap,
+ else if constexpr (_V::size() <= __CHAR_BIT__ * sizeof(_ULLong))
+ {
+ const bitset __bits = __x.__to_bitset();
+- return __generate_from_n_evaluations<_Parts, array<_V, _Parts>>([&](
+- auto __i) constexpr {
+- constexpr size_t __offset = __i * _V::size();
+- return _V(__bitset_init, (__bits >> __offset).to_ullong());
+- });
++ return __generate_from_n_evaluations<_Parts, array<_V, _Parts>>(
++ [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ constexpr size_t __offset = __i * _V::size();
++ return _V(__bitset_init, (__bits >> __offset).to_ullong());
++ });
+ }
+ else
+ {
+- return __generate_from_n_evaluations<_Parts, array<_V, _Parts>>([&](
+- auto __i) constexpr {
+- constexpr size_t __offset = __i * _V::size();
+- return _V(
+- __private_init, [&](auto __j) constexpr {
+- return __x[__j + __offset];
+- });
+- });
++ return __generate_from_n_evaluations<_Parts, array<_V, _Parts>>(
++ [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ constexpr size_t __offset = __i * _V::size();
++ return _V(__private_init,
++ [&](auto __j) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __x[__j + __offset];
++ });
++ });
+ }
+ }
+
+@@ -4009,12 +4114,14 @@ template <size_t... _Sizes, typename _Tp, typename _Ap, typename>
+ using _V = __deduced_simd<_Tp, _N0>;
+
+ if (__x._M_is_constprop())
+- return __generate_from_n_evaluations<sizeof...(_Sizes), _Tuple>([&](
+- auto __i) constexpr {
+- using _Vi = __deduced_simd<_Tp, _SL::_S_at(__i)>;
+- constexpr size_t __offset = _SL::_S_before(__i);
+- return _Vi([&](auto __j) constexpr { return __x[__offset + __j]; });
+- });
++ return __generate_from_n_evaluations<sizeof...(_Sizes), _Tuple>(
++ [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ using _Vi = __deduced_simd<_Tp, _SL::_S_at(__i)>;
++ constexpr size_t __offset = _SL::_S_before(__i);
++ return _Vi([&](auto __j) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __x[__offset + __j];
++ });
++ });
+ else if constexpr (_Np == _N0)
+ {
+ static_assert(sizeof...(_Sizes) == 1);
+@@ -4081,28 +4188,28 @@ template <size_t... _Sizes, typename _Tp, typename _Ap, typename>
+ #ifdef _GLIBCXX_SIMD_USE_ALIASING_LOADS
+ const __may_alias<_Tp>* const __element_ptr
+ = reinterpret_cast<const __may_alias<_Tp>*>(&__x);
+- return __generate_from_n_evaluations<sizeof...(_Sizes), _Tuple>([&](
+- auto __i) constexpr {
+- using _Vi = __deduced_simd<_Tp, _SL::_S_at(__i)>;
+- constexpr size_t __offset = _SL::_S_before(__i);
+- constexpr size_t __base_align = alignof(simd<_Tp, _Ap>);
+- constexpr size_t __a
+- = __base_align - ((__offset * sizeof(_Tp)) % __base_align);
+- constexpr size_t __b = ((__a - 1) & __a) ^ __a;
+- constexpr size_t __alignment = __b == 0 ? __a : __b;
+- return _Vi(__element_ptr + __offset, overaligned<__alignment>);
+- });
++ return __generate_from_n_evaluations<sizeof...(_Sizes), _Tuple>(
++ [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ using _Vi = __deduced_simd<_Tp, _SL::_S_at(__i)>;
++ constexpr size_t __offset = _SL::_S_before(__i);
++ constexpr size_t __base_align = alignof(simd<_Tp, _Ap>);
++ constexpr size_t __a
++ = __base_align - ((__offset * sizeof(_Tp)) % __base_align);
++ constexpr size_t __b = ((__a - 1) & __a) ^ __a;
++ constexpr size_t __alignment = __b == 0 ? __a : __b;
++ return _Vi(__element_ptr + __offset, overaligned<__alignment>);
++ });
+ #else
+- return __generate_from_n_evaluations<sizeof...(_Sizes), _Tuple>([&](
+- auto __i) constexpr {
+- using _Vi = __deduced_simd<_Tp, _SL::_S_at(__i)>;
+- const auto& __xx = __data(__x);
+- using _Offset = decltype(_SL::_S_before(__i));
+- return _Vi([&](auto __j) constexpr {
+- constexpr _SizeConstant<_Offset::value + __j> __k;
+- return __xx[__k];
+- });
+- });
++ return __generate_from_n_evaluations<sizeof...(_Sizes), _Tuple>(
++ [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ using _Vi = __deduced_simd<_Tp, _SL::_S_at(__i)>;
++ const auto& __xx = __data(__x);
++ using _Offset = decltype(_SL::_S_before(__i));
++ return _Vi([&](auto __j) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ constexpr _SizeConstant<_Offset::value + __j> __k;
++ return __xx[__k];
++ });
++ });
+ #endif
+ }
+
+@@ -4123,8 +4230,7 @@ template <size_t _I, typename _Tp, typename _Ap, typename... _As>
+ // __store_pack_of_simd {{{
+ template <typename _Tp, typename _A0, typename... _As>
+ _GLIBCXX_SIMD_INTRINSIC void
+- __store_pack_of_simd(char* __mem, const simd<_Tp, _A0>& __x0,
+- const simd<_Tp, _As>&... __xs)
++ __store_pack_of_simd(char* __mem, const simd<_Tp, _A0>& __x0, const simd<_Tp, _As>&... __xs)
+ {
+ constexpr size_t __n_bytes = sizeof(_Tp) * simd_size_v<_Tp, _A0>;
+ __builtin_memcpy(__mem, &__data(__x0), __n_bytes);
+@@ -4144,8 +4250,9 @@ template <typename _Tp, typename... _As, typename = __detail::__odr_helper>
+ return simd_cast<_Rp>(__xs...);
+ else if ((... && __xs._M_is_constprop()))
+ return simd<_Tp,
+- simd_abi::deduce_t<_Tp, (simd_size_v<_Tp, _As> + ...)>>([&](
+- auto __i) constexpr { return __subscript_in_pack<__i>(__xs...); });
++ simd_abi::deduce_t<_Tp, (simd_size_v<_Tp, _As> + ...)>>(
++ [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
++ { return __subscript_in_pack<__i>(__xs...); });
+ else
+ {
+ _Rp __r{};
+@@ -4161,9 +4268,10 @@ template <typename _Tp, typename _Abi, size_t _Np>
+ _GLIBCXX_SIMD_CONSTEXPR __deduced_simd<_Tp, simd_size_v<_Tp, _Abi> * _Np>
+ concat(const array<simd<_Tp, _Abi>, _Np>& __x)
+ {
+- return __call_with_subscripts<_Np>(__x, [](const auto&... __xs) {
+- return concat(__xs...);
+- });
++ return __call_with_subscripts<_Np>(
++ __x, [](const auto&... __xs) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return concat(__xs...);
++ });
+ }
+
+ // }}}
+@@ -4178,7 +4286,8 @@ template <typename _Up, typename _Accessor = _Up,
+ int _M_index;
+ _Up& _M_obj;
+
+- _GLIBCXX_SIMD_INTRINSIC constexpr _ValueType _M_read() const noexcept
++ _GLIBCXX_SIMD_INTRINSIC constexpr _ValueType
++ _M_read() const noexcept
+ {
+ if constexpr (is_arithmetic_v<_Up>)
+ return _M_obj;
+@@ -4187,7 +4296,8 @@ template <typename _Up, typename _Accessor = _Up,
+ }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC constexpr void _M_write(_Tp&& __x) const
++ _GLIBCXX_SIMD_INTRINSIC constexpr void
++ _M_write(_Tp&& __x) const
+ { _Accessor::_S_set(_M_obj, _M_index, static_cast<_Tp&&>(__x)); }
+
+ public:
+@@ -4197,32 +4307,32 @@ template <typename _Up, typename _Accessor = _Up,
+
+ using value_type = _ValueType;
+
+- _GLIBCXX_SIMD_INTRINSIC _SmartReference(const _SmartReference&) = delete;
++ _GLIBCXX_SIMD_INTRINSIC
++ _SmartReference(const _SmartReference&) = delete;
+
+- _GLIBCXX_SIMD_INTRINSIC constexpr operator value_type() const noexcept
++ _GLIBCXX_SIMD_INTRINSIC constexpr
++ operator value_type() const noexcept
+ { return _M_read(); }
+
+- template <typename _Tp,
+- typename
+- = _ValuePreservingOrInt<__remove_cvref_t<_Tp>, value_type>>
+- _GLIBCXX_SIMD_INTRINSIC constexpr _SmartReference operator=(_Tp&& __x) &&
++ template <typename _Tp, typename = _ValuePreservingOrInt<__remove_cvref_t<_Tp>, value_type>>
++ _GLIBCXX_SIMD_INTRINSIC constexpr _SmartReference
++ operator=(_Tp&& __x) &&
+ {
+ _M_write(static_cast<_Tp&&>(__x));
+ return {_M_obj, _M_index};
+ }
+
+-#define _GLIBCXX_SIMD_OP_(__op) \
+- template <typename _Tp, \
+- typename _TT \
+- = decltype(declval<value_type>() __op declval<_Tp>()), \
+- typename = _ValuePreservingOrInt<__remove_cvref_t<_Tp>, _TT>, \
+- typename = _ValuePreservingOrInt<_TT, value_type>> \
+- _GLIBCXX_SIMD_INTRINSIC constexpr _SmartReference \
+- operator __op##=(_Tp&& __x) && \
+- { \
+- const value_type& __lhs = _M_read(); \
+- _M_write(__lhs __op __x); \
+- return {_M_obj, _M_index}; \
++#define _GLIBCXX_SIMD_OP_(__op) \
++ template <typename _Tp, \
++ typename _TT = decltype(declval<value_type>() __op declval<_Tp>()), \
++ typename = _ValuePreservingOrInt<__remove_cvref_t<_Tp>, _TT>, \
++ typename = _ValuePreservingOrInt<_TT, value_type>> \
++ _GLIBCXX_SIMD_INTRINSIC constexpr _SmartReference \
++ operator __op##=(_Tp&& __x) && \
++ { \
++ const value_type& __lhs = _M_read(); \
++ _M_write(__lhs __op __x); \
++ return {_M_obj, _M_index}; \
+ }
+ _GLIBCXX_SIMD_ALL_ARITHMETICS(_GLIBCXX_SIMD_OP_);
+ _GLIBCXX_SIMD_ALL_SHIFTS(_GLIBCXX_SIMD_OP_);
+@@ -4230,9 +4340,9 @@ template <typename _Up, typename _Accessor = _Up,
+ #undef _GLIBCXX_SIMD_OP_
+
+ template <typename _Tp = void,
+- typename
+- = decltype(++declval<conditional_t<true, value_type, _Tp>&>())>
+- _GLIBCXX_SIMD_INTRINSIC constexpr _SmartReference operator++() &&
++ typename = decltype(++declval<conditional_t<true, value_type, _Tp>&>())>
++ _GLIBCXX_SIMD_INTRINSIC constexpr _SmartReference
++ operator++() &&
+ {
+ value_type __x = _M_read();
+ _M_write(++__x);
+@@ -4240,9 +4350,9 @@ template <typename _Up, typename _Accessor = _Up,
+ }
+
+ template <typename _Tp = void,
+- typename
+- = decltype(declval<conditional_t<true, value_type, _Tp>&>()++)>
+- _GLIBCXX_SIMD_INTRINSIC constexpr value_type operator++(int) &&
++ typename = decltype(declval<conditional_t<true, value_type, _Tp>&>()++)>
++ _GLIBCXX_SIMD_INTRINSIC constexpr value_type
++ operator++(int) &&
+ {
+ const value_type __r = _M_read();
+ value_type __x = __r;
+@@ -4251,9 +4361,9 @@ template <typename _Up, typename _Accessor = _Up,
+ }
+
+ template <typename _Tp = void,
+- typename
+- = decltype(--declval<conditional_t<true, value_type, _Tp>&>())>
+- _GLIBCXX_SIMD_INTRINSIC constexpr _SmartReference operator--() &&
++ typename = decltype(--declval<conditional_t<true, value_type, _Tp>&>())>
++ _GLIBCXX_SIMD_INTRINSIC constexpr _SmartReference
++ operator--() &&
+ {
+ value_type __x = _M_read();
+ _M_write(--__x);
+@@ -4261,9 +4371,9 @@ template <typename _Up, typename _Accessor = _Up,
+ }
+
+ template <typename _Tp = void,
+- typename
+- = decltype(declval<conditional_t<true, value_type, _Tp>&>()--)>
+- _GLIBCXX_SIMD_INTRINSIC constexpr value_type operator--(int) &&
++ typename = decltype(declval<conditional_t<true, value_type, _Tp>&>()--)>
++ _GLIBCXX_SIMD_INTRINSIC constexpr value_type
++ operator--(int) &&
+ {
+ const value_type __r = _M_read();
+ value_type __x = __r;
+@@ -4339,7 +4449,8 @@ template <int _Bytes>
+ template <template <int> class _Abi, int _Bytes, typename _Tp>
+ struct __find_next_valid_abi
+ {
+- static constexpr auto _S_choose()
++ static constexpr auto
++ _S_choose()
+ {
+ constexpr int _NextBytes = std::__bit_ceil(_Bytes) / 2;
+ using _NextAbi = _Abi<_NextBytes>;
+@@ -4383,7 +4494,8 @@ template <template <int> class _A0, template <int> class... _Rest>
+ typename _AbiList<_Rest...>::template _FirstValidAbi<_Tp, _Np>>;
+
+ template <typename _Tp, int _Np>
+- static constexpr auto _S_determine_best_abi()
++ static constexpr auto
++ _S_determine_best_abi()
+ {
+ static_assert(_Np >= 1);
+ constexpr int _Bytes = sizeof(_Tp) * _Np;
+@@ -4496,7 +4608,7 @@ template <typename _Tp, typename _Abi>
+
+ // }}}
+ // access to internal representation (optional feature) {{{
+- _GLIBCXX_SIMD_ALWAYS_INLINE explicit
++ _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR explicit
+ simd_mask(typename _Traits::_MaskCastType __init)
+ : _M_data{__init} {}
+ // conversions to internal type is done in _MaskBase
+@@ -4507,11 +4619,11 @@ template <typename _Tp, typename _Abi>
+ // Conversion of simd_mask to and from bitset makes it much easier to
+ // interface with other facilities. I suggest adding `static
+ // simd_mask::from_bitset` and `simd_mask::to_bitset`.
+- _GLIBCXX_SIMD_ALWAYS_INLINE static simd_mask
++ _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR static simd_mask
+ __from_bitset(bitset<size()> bs)
+ { return {__bitset_init, bs}; }
+
+- _GLIBCXX_SIMD_ALWAYS_INLINE bitset<size()>
++ _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR bitset<size()>
+ __to_bitset() const
+ { return _Impl::_S_to_bits(_M_data)._M_to_bitset(); }
+
+@@ -4536,7 +4648,7 @@ template <typename _Tp, typename _Abi>
+ template <typename _Up, typename = enable_if_t<conjunction<
+ is_same<abi_type, simd_abi::fixed_size<size()>>,
+ is_same<_Up, _Up>>::value>>
+- _GLIBCXX_SIMD_ALWAYS_INLINE
++ _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR
+ simd_mask(const simd_mask<_Up, simd_abi::fixed_size<size()>>& __x)
+ : _M_data(_Impl::_S_from_bitmask(__data(__x), _S_type_tag)) {}
+ #endif
+@@ -4544,41 +4656,36 @@ template <typename _Tp, typename _Abi>
+ // }}}
+ // load constructor {{{
+ template <typename _Flags>
+- _GLIBCXX_SIMD_ALWAYS_INLINE
+- simd_mask(const value_type* __mem, _Flags)
+- : _M_data(_Impl::template _S_load<_Ip>(
+- _Flags::template _S_apply<simd_mask>(__mem))) {}
++ _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR
++ simd_mask(const value_type* __mem, _IsSimdFlagType<_Flags>)
++ : _M_data(_Impl::template _S_load<_Ip>(_Flags::template _S_apply<simd_mask>(__mem))) {}
+
+ template <typename _Flags>
+- _GLIBCXX_SIMD_ALWAYS_INLINE
+- simd_mask(const value_type* __mem, simd_mask __k, _Flags)
++ _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR
++ simd_mask(const value_type* __mem, simd_mask __k, _IsSimdFlagType<_Flags>)
+ : _M_data{}
+ {
+- _M_data
+- = _Impl::_S_masked_load(_M_data, __k._M_data,
+- _Flags::template _S_apply<simd_mask>(__mem));
++ _M_data = _Impl::_S_masked_load(_M_data, __k._M_data,
++ _Flags::template _S_apply<simd_mask>(__mem));
+ }
+
+ // }}}
+ // loads [simd_mask.load] {{{
+ template <typename _Flags>
+- _GLIBCXX_SIMD_ALWAYS_INLINE void
+- copy_from(const value_type* __mem, _Flags)
+- {
+- _M_data = _Impl::template _S_load<_Ip>(
+- _Flags::template _S_apply<simd_mask>(__mem));
+- }
++ _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR void
++ copy_from(const value_type* __mem, _IsSimdFlagType<_Flags>)
++ { _M_data = _Impl::template _S_load<_Ip>(_Flags::template _S_apply<simd_mask>(__mem)); }
+
+ // }}}
+ // stores [simd_mask.store] {{{
+ template <typename _Flags>
+- _GLIBCXX_SIMD_ALWAYS_INLINE void
+- copy_to(value_type* __mem, _Flags) const
++ _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR void
++ copy_to(value_type* __mem, _IsSimdFlagType<_Flags>) const
+ { _Impl::_S_store(_M_data, _Flags::template _S_apply<simd_mask>(__mem)); }
+
+ // }}}
+ // scalar access {{{
+- _GLIBCXX_SIMD_ALWAYS_INLINE reference
++ _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR reference
+ operator[](size_t __i)
+ {
+ if (__i >= size())
+@@ -4586,7 +4693,7 @@ template <typename _Tp, typename _Abi>
+ return {_M_data, int(__i)};
+ }
+
+- _GLIBCXX_SIMD_ALWAYS_INLINE value_type
++ _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR value_type
+ operator[](size_t __i) const
+ {
+ if (__i >= size())
+@@ -4599,7 +4706,7 @@ template <typename _Tp, typename _Abi>
+
+ // }}}
+ // negation {{{
+- _GLIBCXX_SIMD_ALWAYS_INLINE simd_mask
++ _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR simd_mask
+ operator!() const
+ { return {__private_init, _Impl::_S_bit_not(_M_data)}; }
+
+@@ -4608,9 +4715,8 @@ template <typename _Tp, typename _Abi>
+ #ifdef _GLIBCXX_SIMD_ENABLE_IMPLICIT_MASK_CAST
+ // simd_mask<int> && simd_mask<uint> needs disambiguation
+ template <typename _Up, typename _A2,
+- typename
+- = enable_if_t<is_convertible_v<simd_mask<_Up, _A2>, simd_mask>>>
+- _GLIBCXX_SIMD_ALWAYS_INLINE friend simd_mask
++ typename = enable_if_t<is_convertible_v<simd_mask<_Up, _A2>, simd_mask>>>
++ _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask
+ operator&&(const simd_mask& __x, const simd_mask<_Up, _A2>& __y)
+ {
+ return {__private_init,
+@@ -4618,9 +4724,8 @@ template <typename _Tp, typename _Abi>
+ }
+
+ template <typename _Up, typename _A2,
+- typename
+- = enable_if_t<is_convertible_v<simd_mask<_Up, _A2>, simd_mask>>>
+- _GLIBCXX_SIMD_ALWAYS_INLINE friend simd_mask
++ typename = enable_if_t<is_convertible_v<simd_mask<_Up, _A2>, simd_mask>>>
++ _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask
+ operator||(const simd_mask& __x, const simd_mask<_Up, _A2>& __y)
+ {
+ return {__private_init,
+@@ -4628,45 +4733,41 @@ template <typename _Tp, typename _Abi>
+ }
+ #endif // _GLIBCXX_SIMD_ENABLE_IMPLICIT_MASK_CAST
+
+- _GLIBCXX_SIMD_ALWAYS_INLINE friend simd_mask
++ _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask
+ operator&&(const simd_mask& __x, const simd_mask& __y)
+- {
+- return {__private_init, _Impl::_S_logical_and(__x._M_data, __y._M_data)};
+- }
++ { return {__private_init, _Impl::_S_logical_and(__x._M_data, __y._M_data)}; }
+
+- _GLIBCXX_SIMD_ALWAYS_INLINE friend simd_mask
++ _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask
+ operator||(const simd_mask& __x, const simd_mask& __y)
+- {
+- return {__private_init, _Impl::_S_logical_or(__x._M_data, __y._M_data)};
+- }
++ { return {__private_init, _Impl::_S_logical_or(__x._M_data, __y._M_data)}; }
+
+- _GLIBCXX_SIMD_ALWAYS_INLINE friend simd_mask
++ _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask
+ operator&(const simd_mask& __x, const simd_mask& __y)
+ { return {__private_init, _Impl::_S_bit_and(__x._M_data, __y._M_data)}; }
+
+- _GLIBCXX_SIMD_ALWAYS_INLINE friend simd_mask
++ _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask
+ operator|(const simd_mask& __x, const simd_mask& __y)
+ { return {__private_init, _Impl::_S_bit_or(__x._M_data, __y._M_data)}; }
+
+- _GLIBCXX_SIMD_ALWAYS_INLINE friend simd_mask
++ _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask
+ operator^(const simd_mask& __x, const simd_mask& __y)
+ { return {__private_init, _Impl::_S_bit_xor(__x._M_data, __y._M_data)}; }
+
+- _GLIBCXX_SIMD_ALWAYS_INLINE friend simd_mask&
++ _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask&
+ operator&=(simd_mask& __x, const simd_mask& __y)
+ {
+ __x._M_data = _Impl::_S_bit_and(__x._M_data, __y._M_data);
+ return __x;
+ }
+
+- _GLIBCXX_SIMD_ALWAYS_INLINE friend simd_mask&
++ _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask&
+ operator|=(simd_mask& __x, const simd_mask& __y)
+ {
+ __x._M_data = _Impl::_S_bit_or(__x._M_data, __y._M_data);
+ return __x;
+ }
+
+- _GLIBCXX_SIMD_ALWAYS_INLINE friend simd_mask&
++ _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask&
+ operator^=(simd_mask& __x, const simd_mask& __y)
+ {
+ __x._M_data = _Impl::_S_bit_xor(__x._M_data, __y._M_data);
+@@ -4696,16 +4797,16 @@ template <typename _Tp, typename _Abi>
+ simd_mask(_PrivateInit, _Fp&& __gen)
+ : _M_data()
+ {
+- __execute_n_times<size()>([&](auto __i) constexpr {
++ __execute_n_times<size()>([&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ _Impl::_S_set(_M_data, __i, __gen(__i));
+ });
+ }
+
+ // }}}
+ // bitset_init ctor {{{
+- _GLIBCXX_SIMD_INTRINSIC simd_mask(_BitsetInit, bitset<size()> __init)
+- : _M_data(
+- _Impl::_S_from_bitmask(_SanitizedBitMask<size()>(__init), _S_type_tag))
++ _GLIBCXX_SIMD_INTRINSIC constexpr
++ simd_mask(_BitsetInit, bitset<size()> __init)
++ : _M_data(_Impl::_S_from_bitmask(_SanitizedBitMask<size()>(__init), _S_type_tag))
+ {}
+
+ // }}}
+@@ -4717,8 +4818,7 @@ template <typename _Tp, typename _Abi>
+ struct _CvtProxy
+ {
+ template <typename _Up, typename _A2,
+- typename
+- = enable_if_t<simd_size_v<_Up, _A2> == simd_size_v<_Tp, _Abi>>>
++ typename = enable_if_t<simd_size_v<_Up, _A2> == simd_size_v<_Tp, _Abi>>>
+ _GLIBCXX_SIMD_ALWAYS_INLINE
+ operator simd_mask<_Up, _A2>() &&
+ {
+@@ -4882,7 +4982,9 @@ template <typename _Tp, typename _Abi>
+ if (__builtin_is_constant_evaluated() || __k._M_is_constprop())
+ {
+ const int __r = __call_with_subscripts<simd_size_v<_Tp, _Abi>>(
+- __k, [](auto... __elements) { return ((__elements != 0) + ...); });
++ __k, [](auto... __elements) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return ((__elements != 0) + ...);
++ });
+ if (__builtin_is_constant_evaluated() || __builtin_constant_p(__r))
+ return __r;
+ }
+@@ -4897,8 +4999,11 @@ template <typename _Tp, typename _Abi>
+ {
+ constexpr size_t _Np = simd_size_v<_Tp, _Abi>;
+ const size_t _Idx = __call_with_n_evaluations<_Np>(
+- [](auto... __indexes) { return std::min({__indexes...}); },
+- [&](auto __i) { return __k[__i] ? +__i : _Np; });
++ [](auto... __indexes) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return std::min({__indexes...});
++ }, [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __k[__i] ? +__i : _Np;
++ });
+ if (_Idx >= _Np)
+ __invoke_ub("find_first_set(empty mask) is UB");
+ if (__builtin_constant_p(_Idx))
+@@ -4915,8 +5020,11 @@ template <typename _Tp, typename _Abi>
+ {
+ constexpr size_t _Np = simd_size_v<_Tp, _Abi>;
+ const int _Idx = __call_with_n_evaluations<_Np>(
+- [](auto... __indexes) { return std::max({__indexes...}); },
+- [&](auto __i) { return __k[__i] ? int(__i) : -1; });
++ [](auto... __indexes) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return std::max({__indexes...});
++ }, [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __k[__i] ? int(__i) : -1;
++ });
+ if (_Idx < 0)
+ __invoke_ub("find_first_set(empty mask) is UB");
+ if (__builtin_constant_p(_Idx))
+@@ -4965,7 +5073,8 @@ template <typename _V, typename _Tp, typename _Abi>
+ {
+ using _Impl = typename _SimdTraits<_Tp, _Abi>::_SimdImpl;
+
+- _GLIBCXX_SIMD_INTRINSIC const _V& __derived() const
++ _GLIBCXX_SIMD_INTRINSIC constexpr const _V&
++ __derived() const
+ { return *static_cast<const _V*>(this); }
+
+ template <typename _Up>
+@@ -5185,16 +5294,16 @@ template <typename _Tp, typename _Abi>
+
+ // load constructor
+ template <typename _Up, typename _Flags>
+- _GLIBCXX_SIMD_ALWAYS_INLINE
+- simd(const _Up* __mem, _Flags)
++ _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR
++ simd(const _Up* __mem, _IsSimdFlagType<_Flags>)
+ : _M_data(
+ _Impl::_S_load(_Flags::template _S_apply<simd>(__mem), _S_type_tag))
+ {}
+
+ // loads [simd.load]
+ template <typename _Up, typename _Flags>
+- _GLIBCXX_SIMD_ALWAYS_INLINE void
+- copy_from(const _Vectorizable<_Up>* __mem, _Flags)
++ _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR void
++ copy_from(const _Vectorizable<_Up>* __mem, _IsSimdFlagType<_Flags>)
+ {
+ _M_data = static_cast<decltype(_M_data)>(
+ _Impl::_S_load(_Flags::template _S_apply<simd>(__mem), _S_type_tag));
+@@ -5202,8 +5311,8 @@ template <typename _Tp, typename _Abi>
+
+ // stores [simd.store]
+ template <typename _Up, typename _Flags>
+- _GLIBCXX_SIMD_ALWAYS_INLINE void
+- copy_to(_Vectorizable<_Up>* __mem, _Flags) const
++ _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR void
++ copy_to(_Vectorizable<_Up>* __mem, _IsSimdFlagType<_Flags>) const
+ {
+ _Impl::_S_store(_M_data, _Flags::template _S_apply<simd>(__mem),
+ _S_type_tag);
+@@ -5374,7 +5483,7 @@ template <typename _Tp, typename _Abi>
+ }
+
+ private:
+- _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR static mask_type
++ _GLIBCXX_SIMD_INTRINSIC static constexpr mask_type
+ _S_make_mask(typename mask_type::_MemberType __k)
+ { return {__private_init, __k}; }
+
+@@ -5401,26 +5510,17 @@ namespace __float_bitwise_operators { //{{{
+ template <typename _Tp, typename _Ap>
+ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR simd<_Tp, _Ap>
+ operator^(const simd<_Tp, _Ap>& __a, const simd<_Tp, _Ap>& __b)
+- {
+- return {__private_init,
+- _Ap::_SimdImpl::_S_bit_xor(__data(__a), __data(__b))};
+- }
++ { return {__private_init, _Ap::_SimdImpl::_S_bit_xor(__data(__a), __data(__b))}; }
+
+ template <typename _Tp, typename _Ap>
+ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR simd<_Tp, _Ap>
+ operator|(const simd<_Tp, _Ap>& __a, const simd<_Tp, _Ap>& __b)
+- {
+- return {__private_init,
+- _Ap::_SimdImpl::_S_bit_or(__data(__a), __data(__b))};
+- }
++ { return {__private_init, _Ap::_SimdImpl::_S_bit_or(__data(__a), __data(__b))}; }
+
+ template <typename _Tp, typename _Ap>
+ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR simd<_Tp, _Ap>
+ operator&(const simd<_Tp, _Ap>& __a, const simd<_Tp, _Ap>& __b)
+- {
+- return {__private_init,
+- _Ap::_SimdImpl::_S_bit_and(__data(__a), __data(__b))};
+- }
++ { return {__private_init, _Ap::_SimdImpl::_S_bit_and(__data(__a), __data(__b))}; }
+
+ template <typename _Tp, typename _Ap>
+ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
+--- a/src/libstdc++-v3/include/experimental/bits/simd_builtin.h
++++ b/src/libstdc++-v3/include/experimental/bits/simd_builtin.h
+@@ -52,7 +52,7 @@ template <typename _V, typename = _VectorTraits<_V>>
+ // Index == -1 requests zeroing of the output element
+ template <int... _Indices, typename _Tp, typename _TVT = _VectorTraits<_Tp>,
+ typename = __detail::__odr_helper>
+- _Tp
++ constexpr _Tp
+ __vector_permute(_Tp __x)
+ {
+ static_assert(sizeof...(_Indices) == _TVT::_S_full_size);
+@@ -65,7 +65,7 @@ template <int... _Indices, typename _Tp, typename _TVT = _VectorTraits<_Tp>,
+ // Index == -1 requests zeroing of the output element
+ template <int... _Indices, typename _Tp, typename _TVT = _VectorTraits<_Tp>,
+ typename = __detail::__odr_helper>
+- _Tp
++ constexpr _Tp
+ __vector_shuffle(_Tp __x, _Tp __y)
+ {
+ return _Tp{(_Indices == -1 ? 0
+@@ -194,15 +194,18 @@ template <unsigned __shift, typename _Tp, typename _TVT = _VectorTraits<_Tp>>
+ using _Up = decltype(__w);
+ return __intrin_bitcast<_Tp>(
+ __call_with_n_evaluations<(sizeof(_Tp) - __shift) / __chunksize>(
+- [](auto... __chunks) { return _Up{__chunks...}; },
+- [&](auto __i) { return __w[__shift / __chunksize + __i]; }));
++ [](auto... __chunks) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return _Up{__chunks...};
++ }, [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __w[__shift / __chunksize + __i];
++ }));
+ }
+ }
+
+ // }}}
+ // __extract_part(_SimdWrapper<_Tp, _Np>) {{{
+ template <int _Index, int _Total, int _Combine, typename _Tp, size_t _Np>
+- _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_CONST
++ _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_CONST constexpr
+ _SimdWrapper<_Tp, _Np / _Total * _Combine>
+ __extract_part(const _SimdWrapper<_Tp, _Np> __x)
+ {
+@@ -225,7 +228,9 @@ template <int _Index, int _Total, int _Combine, typename _Tp, size_t _Np>
+ // by _Total");
+ if (__x._M_is_constprop())
+ return __generate_from_n_evaluations<__return_size, _R>(
+- [&](auto __i) { return __x[__values_to_skip + __i]; });
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __x[__values_to_skip + __i];
++ });
+ if constexpr (_Index == 0 && _Total == 1)
+ return __x;
+ else if constexpr (_Index == 0)
+@@ -570,7 +575,9 @@ template <typename _To,
+ constexpr auto _Np
+ = _NParts == 0 ? _FromVT::_S_partial_width - _Offset : _NParts;
+ return __generate_from_n_evaluations<_Np, array<_To, _Np>>(
+- [&](auto __i) { return static_cast<_To>(__v[__i + _Offset]); });
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return static_cast<_To>(__v[__i + _Offset]);
++ });
+ }
+ else
+ {
+@@ -611,13 +618,14 @@ template <typename _To,
+ return __vector_bitcast<_FromT, decltype(__n)::value>(__vv);
+ };
+ [[maybe_unused]] const auto __vi = __to_intrin(__v);
+- auto&& __make_array = [](auto __x0, [[maybe_unused]] auto __x1) {
+- if constexpr (_Np == 1)
+- return _R{__intrin_bitcast<_To>(__x0)};
+- else
+- return _R{__intrin_bitcast<_To>(__x0),
+- __intrin_bitcast<_To>(__x1)};
+- };
++ auto&& __make_array
++ = [](auto __x0, [[maybe_unused]] auto __x1) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ if constexpr (_Np == 1)
++ return _R{__intrin_bitcast<_To>(__x0)};
++ else
++ return _R{__intrin_bitcast<_To>(__x0),
++ __intrin_bitcast<_To>(__x1)};
++ };
+
+ if constexpr (_Np == 0)
+ return _R{};
+@@ -642,7 +650,7 @@ template <typename _To,
+ = __convert_all<__vector_type16_t<int>, _Np>(
+ __adjust(_SizeConstant<_Np * 4>(), __v));
+ return __generate_from_n_evaluations<_Np, _R>(
+- [&](auto __i) {
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ return __vector_convert<_To>(__as_wrapper(__ints[__i]));
+ });
+ }
+@@ -687,36 +695,40 @@ template <typename _To,
+ __vector_bitcast<int>(_mm_unpacklo_epi16(__vv[1], __vv[1])),
+ __vector_bitcast<int>(_mm_unpackhi_epi16(__vv[1], __vv[1]))};
+ if constexpr (sizeof(_ToT) == 4)
+- return __generate_from_n_evaluations<_Np, _R>([&](auto __i) {
+- return __vector_convert<_To>(
+- _SimdWrapper<int, 4>(__vvvv[__i] >> 24));
+- });
++ return __generate_from_n_evaluations<_Np, _R>(
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __vector_convert<_To>(
++ _SimdWrapper<int, 4>(__vvvv[__i] >> 24));
++ });
+ else if constexpr (is_integral_v<_ToT>)
+- return __generate_from_n_evaluations<_Np, _R>([&](auto __i) {
+- const auto __signbits = __to_intrin(__vvvv[__i / 2] >> 31);
+- const auto __sx32 = __to_intrin(__vvvv[__i / 2] >> 24);
+- return __vector_bitcast<_ToT>(
+- __i % 2 == 0 ? _mm_unpacklo_epi32(__sx32, __signbits)
+- : _mm_unpackhi_epi32(__sx32, __signbits));
+- });
++ return __generate_from_n_evaluations<_Np, _R>(
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ const auto __signbits = __to_intrin(__vvvv[__i / 2] >> 31);
++ const auto __sx32 = __to_intrin(__vvvv[__i / 2] >> 24);
++ return __vector_bitcast<_ToT>(
++ __i % 2 == 0 ? _mm_unpacklo_epi32(__sx32, __signbits)
++ : _mm_unpackhi_epi32(__sx32, __signbits));
++ });
+ else
+- return __generate_from_n_evaluations<_Np, _R>([&](auto __i) {
+- const _SimdWrapper<int, 4> __int4 = __vvvv[__i / 2] >> 24;
+- return __vector_convert<_To>(
+- __i % 2 == 0 ? __int4
+- : _SimdWrapper<int, 4>(
+- _mm_unpackhi_epi64(__to_intrin(__int4),
+- __to_intrin(__int4))));
+- });
++ return __generate_from_n_evaluations<_Np, _R>(
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ const _SimdWrapper<int, 4> __int4 = __vvvv[__i / 2] >> 24;
++ return __vector_convert<_To>(
++ __i % 2 == 0 ? __int4
++ : _SimdWrapper<int, 4>(
++ _mm_unpackhi_epi64(__to_intrin(__int4),
++ __to_intrin(__int4))));
++ });
+ }
+ else if constexpr (sizeof(_FromT) == 1 && sizeof(_ToT) == 4)
+ {
+ const auto __shorts = __convert_all<__vector_type16_t<
+ conditional_t<is_signed_v<_FromT>, short, unsigned short>>>(
+ __adjust(_SizeConstant<(_Np + 1) / 2 * 8>(), __v));
+- return __generate_from_n_evaluations<_Np, _R>([&](auto __i) {
+- return __convert_all<_To>(__shorts[__i / 2])[__i % 2];
+- });
++ return __generate_from_n_evaluations<_Np, _R>(
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __convert_all<_To>(__shorts[__i / 2])[__i % 2];
++ });
+ }
+ else if constexpr (sizeof(_FromT) == 2 && sizeof(_ToT) == 8
+ && is_signed_v<_FromT> && is_integral_v<_ToT>)
+@@ -736,9 +748,10 @@ template <typename _To,
+ __vector_bitcast<int>(
+ _mm_unpackhi_epi32(_mm_srai_epi32(__vv[1], 16),
+ _mm_srai_epi32(__vv[1], 31)))};
+- return __generate_from_n_evaluations<_Np, _R>([&](auto __i) {
+- return __vector_bitcast<_ToT>(__vvvv[__i]);
+- });
++ return __generate_from_n_evaluations<_Np, _R>(
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __vector_bitcast<_ToT>(__vvvv[__i]);
++ });
+ }
+ else if constexpr (sizeof(_FromT) <= 2 && sizeof(_ToT) == 8)
+ {
+@@ -747,9 +760,10 @@ template <typename _To,
+ is_signed_v<_FromT> || is_floating_point_v<_ToT>, int,
+ unsigned int>>>(
+ __adjust(_SizeConstant<(_Np + 1) / 2 * 4>(), __v));
+- return __generate_from_n_evaluations<_Np, _R>([&](auto __i) {
+- return __convert_all<_To>(__ints[__i / 2])[__i % 2];
+- });
++ return __generate_from_n_evaluations<_Np, _R>(
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __convert_all<_To>(__ints[__i / 2])[__i % 2];
++ });
+ }
+ else
+ __assert_unreachable<_To>();
+@@ -779,14 +793,14 @@ template <typename _To,
+ __extract_part<_Offset, _FromVT::_S_partial_width,
+ _ToVT::_S_full_size>(__v))};
+ else
+- return __generate_from_n_evaluations<_Np, _R>([&](
+- auto __i) constexpr {
+- auto __part
+- = __extract_part<__i * _ToVT::_S_full_size + _Offset,
+- _FromVT::_S_partial_width,
+- _ToVT::_S_full_size>(__v);
+- return __vector_convert<_To>(__part);
+- });
++ return __generate_from_n_evaluations<_Np, _R>(
++ [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ auto __part
++ = __extract_part<__i * _ToVT::_S_full_size + _Offset,
++ _FromVT::_S_partial_width,
++ _ToVT::_S_full_size>(__v);
++ return __vector_convert<_To>(__part);
++ });
+ }
+ else if constexpr (_Offset == 0)
+ return array<_To, 1>{__vector_convert<_To>(__v)};
+@@ -822,22 +836,19 @@ template <typename _Tp, typename _Mp, typename _Abi, size_t _Np>
+ // _SimdBase / base class for simd, providing extra conversions {{{
+ struct _SimdBase2
+ {
+- _GLIBCXX_SIMD_ALWAYS_INLINE
+- explicit operator __intrinsic_type_t<_Tp, _Np>() const
+- {
+- return __to_intrin(static_cast<const simd<_Tp, _Abi>*>(this)->_M_data);
+- }
+- _GLIBCXX_SIMD_ALWAYS_INLINE
+- explicit operator __vector_type_t<_Tp, _Np>() const
+- {
+- return static_cast<const simd<_Tp, _Abi>*>(this)->_M_data.__builtin();
+- }
++ _GLIBCXX_SIMD_ALWAYS_INLINE explicit
++ operator __intrinsic_type_t<_Tp, _Np>() const
++ { return __to_intrin(static_cast<const simd<_Tp, _Abi>*>(this)->_M_data); }
++
++ _GLIBCXX_SIMD_ALWAYS_INLINE explicit
++ operator __vector_type_t<_Tp, _Np>() const
++ { return static_cast<const simd<_Tp, _Abi>*>(this)->_M_data.__builtin(); }
+ };
+
+ struct _SimdBase1
+ {
+- _GLIBCXX_SIMD_ALWAYS_INLINE
+- explicit operator __intrinsic_type_t<_Tp, _Np>() const
++ _GLIBCXX_SIMD_ALWAYS_INLINE explicit
++ operator __intrinsic_type_t<_Tp, _Np>() const
+ { return __data(*static_cast<const simd<_Tp, _Abi>*>(this)); }
+ };
+
+@@ -849,23 +860,19 @@ template <typename _Tp, typename _Mp, typename _Abi, size_t _Np>
+ // _MaskBase {{{
+ struct _MaskBase2
+ {
+- _GLIBCXX_SIMD_ALWAYS_INLINE
+- explicit operator __intrinsic_type_t<_Tp, _Np>() const
+- {
+- return static_cast<const simd_mask<_Tp, _Abi>*>(this)
+- ->_M_data.__intrin();
+- }
+- _GLIBCXX_SIMD_ALWAYS_INLINE
+- explicit operator __vector_type_t<_Tp, _Np>() const
+- {
+- return static_cast<const simd_mask<_Tp, _Abi>*>(this)->_M_data._M_data;
+- }
++ _GLIBCXX_SIMD_ALWAYS_INLINE explicit
++ operator __intrinsic_type_t<_Tp, _Np>() const
++ { return static_cast<const simd_mask<_Tp, _Abi>*>(this) ->_M_data.__intrin(); }
++
++ _GLIBCXX_SIMD_ALWAYS_INLINE explicit
++ operator __vector_type_t<_Tp, _Np>() const
++ { return static_cast<const simd_mask<_Tp, _Abi>*>(this)->_M_data._M_data; }
+ };
+
+ struct _MaskBase1
+ {
+- _GLIBCXX_SIMD_ALWAYS_INLINE
+- explicit operator __intrinsic_type_t<_Tp, _Np>() const
++ _GLIBCXX_SIMD_ALWAYS_INLINE explicit
++ operator __intrinsic_type_t<_Tp, _Np>() const
+ { return __data(*static_cast<const simd_mask<_Tp, _Abi>*>(this)); }
+ };
+
+@@ -884,6 +891,7 @@ template <typename _Tp, typename _Mp, typename _Abi, size_t _Np>
+ public:
+ _GLIBCXX_SIMD_ALWAYS_INLINE
+ _MaskCastType(_Up __x) : _M_data(__x) {}
++
+ _GLIBCXX_SIMD_ALWAYS_INLINE
+ operator _MaskMember() const { return _M_data; }
+ };
+@@ -897,9 +905,10 @@ template <typename _Tp, typename _Mp, typename _Abi, size_t _Np>
+ _SimdMember _M_data;
+
+ public:
+- _GLIBCXX_SIMD_ALWAYS_INLINE
++ _GLIBCXX_SIMD_ALWAYS_INLINE constexpr
+ _SimdCastType1(_Ap __a) : _M_data(__vector_bitcast<_Tp>(__a)) {}
+- _GLIBCXX_SIMD_ALWAYS_INLINE
++
++ _GLIBCXX_SIMD_ALWAYS_INLINE constexpr
+ operator _SimdMember() const { return _M_data; }
+ };
+
+@@ -910,11 +919,13 @@ template <typename _Tp, typename _Mp, typename _Abi, size_t _Np>
+ _SimdMember _M_data;
+
+ public:
+- _GLIBCXX_SIMD_ALWAYS_INLINE
++ _GLIBCXX_SIMD_ALWAYS_INLINE constexpr
+ _SimdCastType2(_Ap __a) : _M_data(__vector_bitcast<_Tp>(__a)) {}
+- _GLIBCXX_SIMD_ALWAYS_INLINE
++
++ _GLIBCXX_SIMD_ALWAYS_INLINE constexpr
+ _SimdCastType2(_Bp __b) : _M_data(__b) {}
+- _GLIBCXX_SIMD_ALWAYS_INLINE
++
++ _GLIBCXX_SIMD_ALWAYS_INLINE constexpr
+ operator _SimdMember() const { return _M_data; }
+ };
+
+@@ -1017,23 +1028,21 @@ template <int _UsedBytes>
+ else
+ {
+ constexpr auto __size = _S_size<_Tp>;
+- _GLIBCXX_SIMD_USE_CONSTEXPR auto __r = __generate_vector<_UV>(
+- [](auto __i) constexpr { return __i < __size ? -1 : 0; });
++ _GLIBCXX_SIMD_USE_CONSTEXPR auto __r
++ = __generate_vector<_UV>([](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
++ { return __i < __size ? -1 : 0; });
+ return __r;
+ }
+ }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static constexpr __intrinsic_type_t<_Tp,
+- _S_size<_Tp>>
++ _GLIBCXX_SIMD_INTRINSIC static constexpr __intrinsic_type_t<_Tp, _S_size<_Tp>>
+ _S_implicit_mask_intrin()
+- {
+- return __to_intrin(
+- __vector_bitcast<_Tp>(_S_implicit_mask<_Tp>()._M_data));
+- }
++ { return __to_intrin(__vector_bitcast<_Tp>(_S_implicit_mask<_Tp>()._M_data)); }
+
+ template <typename _TW, typename _TVT = _VectorTraits<_TW>>
+- _GLIBCXX_SIMD_INTRINSIC static constexpr _TW _S_masked(_TW __x)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _TW
++ _S_masked(_TW __x)
+ {
+ using _Tp = typename _TVT::value_type;
+ if constexpr (!_MaskMember<_Tp>::_S_is_partial)
+@@ -1155,8 +1164,7 @@ template <int _UsedBytes>
+ { return __implicit_mask_n<_S_size<_Tp>>(); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static constexpr __bool_storage_member_type_t<
+- _S_size<_Tp>>
++ _GLIBCXX_SIMD_INTRINSIC static constexpr __bool_storage_member_type_t<_S_size<_Tp>>
+ _S_implicit_mask_intrin()
+ { return __implicit_mask_n<_S_size<_Tp>>(); }
+
+@@ -1208,7 +1216,7 @@ template <int _UsedBytes>
+ if constexpr (is_integral_v<typename _TVT::value_type>)
+ return __x
+ | __generate_vector<_Tp, _S_full_size<_Tp>>(
+- [](auto __i) -> _Tp {
++ [](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA -> _Tp {
+ if (__i < _Np)
+ return 0;
+ else
+@@ -1288,7 +1296,8 @@ struct _CommonImplBuiltin
+ // }}}
+ // _S_store {{{
+ template <size_t _ReqBytes = 0, typename _TV>
+- _GLIBCXX_SIMD_INTRINSIC static void _S_store(_TV __x, void* __addr)
++ _GLIBCXX_SIMD_INTRINSIC static void
++ _S_store(_TV __x, void* __addr)
+ {
+ constexpr size_t _Bytes = _ReqBytes == 0 ? sizeof(__x) : _ReqBytes;
+ static_assert(sizeof(__x) >= _Bytes);
+@@ -1324,8 +1333,8 @@ struct _CommonImplBuiltin
+ }
+
+ template <typename _Tp, size_t _Np>
+- _GLIBCXX_SIMD_INTRINSIC static void _S_store(_SimdWrapper<_Tp, _Np> __x,
+- void* __addr)
++ _GLIBCXX_SIMD_INTRINSIC static void
++ _S_store(_SimdWrapper<_Tp, _Np> __x, void* __addr)
+ { _S_store<_Np * sizeof(_Tp)>(__x._M_data, __addr); }
+
+ // }}}
+@@ -1336,6 +1345,11 @@ struct _CommonImplBuiltin
+ {
+ if constexpr (_Np == 1)
+ __mem[0] = __x[0];
++ else if (__builtin_is_constant_evaluated())
++ {
++ for (size_t __i = 0; __i < _Np; ++__i)
++ __mem[__i] = __x[__i];
++ }
+ else if constexpr (_Np == 2)
+ {
+ short __bool2 = (__x._M_to_bits() * 0x81) & 0x0101;
+@@ -1348,26 +1362,27 @@ struct _CommonImplBuiltin
+ }
+ else
+ {
+- __execute_n_times<__div_roundup(_Np, 4)>([&](auto __i) {
+- constexpr int __offset = __i * 4;
+- constexpr int __remaining = _Np - __offset;
+- if constexpr (__remaining > 4 && __remaining <= 7)
+- {
+- const _ULLong __bool7
+- = (__x.template _M_extract<__offset>()._M_to_bits()
+- * 0x40810204081ULL)
+- & 0x0101010101010101ULL;
+- _S_store<__remaining>(__bool7, __mem + __offset);
+- }
+- else if constexpr (__remaining >= 4)
+- {
+- int __bits = __x.template _M_extract<__offset>()._M_to_bits();
+- if constexpr (__remaining > 7)
+- __bits &= 0xf;
+- const int __bool4 = (__bits * 0x204081) & 0x01010101;
+- _S_store<4>(__bool4, __mem + __offset);
+- }
+- });
++ __execute_n_times<__div_roundup(_Np, 4)>(
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ constexpr int __offset = __i * 4;
++ constexpr int __remaining = _Np - __offset;
++ if constexpr (__remaining > 4 && __remaining <= 7)
++ {
++ const _ULLong __bool7
++ = (__x.template _M_extract<__offset>()._M_to_bits()
++ * 0x40810204081ULL)
++ & 0x0101010101010101ULL;
++ _S_store<__remaining>(__bool7, __mem + __offset);
++ }
++ else if constexpr (__remaining >= 4)
++ {
++ int __bits = __x.template _M_extract<__offset>()._M_to_bits();
++ if constexpr (__remaining > 7)
++ __bits &= 0xf;
++ const int __bool4 = (__bits * 0x204081) & 0x01010101;
++ _S_store<4>(__bool4, __mem + __offset);
++ }
++ });
+ }
+ }
+
+@@ -1414,12 +1429,12 @@ template <typename _Abi, typename>
+
+ // _M_make_simd(_SimdWrapper/__intrinsic_type_t) {{{2
+ template <typename _Tp, size_t _Np>
+- _GLIBCXX_SIMD_INTRINSIC static simd<_Tp, _Abi>
++ _GLIBCXX_SIMD_INTRINSIC static constexpr simd<_Tp, _Abi>
+ _M_make_simd(_SimdWrapper<_Tp, _Np> __x)
+ { return {__private_init, __x}; }
+
+ template <typename _Tp, size_t _Np>
+- _GLIBCXX_SIMD_INTRINSIC static simd<_Tp, _Abi>
++ _GLIBCXX_SIMD_INTRINSIC static constexpr simd<_Tp, _Abi>
+ _M_make_simd(__intrinsic_type_t<_Tp, _Np> __x)
+ { return {__private_init, __vector_bitcast<_Tp>(__x)}; }
+
+@@ -1431,21 +1446,21 @@ template <typename _Abi, typename>
+
+ // _S_generator {{{2
+ template <typename _Fp, typename _Tp>
+- inline static constexpr _SimdMember<_Tp> _S_generator(_Fp&& __gen,
+- _TypeTag<_Tp>)
++ inline static constexpr _SimdMember<_Tp>
++ _S_generator(_Fp&& __gen, _TypeTag<_Tp>)
+ {
+- return __generate_vector<_Tp, _S_full_size<_Tp>>([&](
+- auto __i) constexpr {
+- if constexpr (__i < _S_size<_Tp>)
+- return __gen(__i);
+- else
+- return 0;
+- });
++ return __generate_vector<_Tp, _S_full_size<_Tp>>(
++ [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ if constexpr (__i < _S_size<_Tp>)
++ return __gen(__i);
++ else
++ return 0;
++ });
+ }
+
+ // _S_load {{{2
+ template <typename _Tp, typename _Up>
+- _GLIBCXX_SIMD_INTRINSIC static _SimdMember<_Tp>
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdMember<_Tp>
+ _S_load(const _Up* __mem, _TypeTag<_Tp>) noexcept
+ {
+ constexpr size_t _Np = _S_size<_Tp>;
+@@ -1454,11 +1469,16 @@ template <typename _Abi, typename>
+ : (is_floating_point_v<_Up> && __have_avx) || __have_avx2 ? 32
+ : 16;
+ constexpr size_t __bytes_to_load = sizeof(_Up) * _Np;
+- if constexpr (sizeof(_Up) > 8)
+- return __generate_vector<_Tp, _SimdMember<_Tp>::_S_full_size>([&](
+- auto __i) constexpr {
+- return static_cast<_Tp>(__i < _Np ? __mem[__i] : 0);
+- });
++ if (__builtin_is_constant_evaluated())
++ return __generate_vector<_Tp, _S_full_size<_Tp>>(
++ [&](auto __i) constexpr {
++ return static_cast<_Tp>(__i < _Np ? __mem[__i] : 0);
++ });
++ else if constexpr (sizeof(_Up) > 8)
++ return __generate_vector<_Tp, _SimdMember<_Tp>::_S_full_size>(
++ [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return static_cast<_Tp>(__i < _Np ? __mem[__i] : 0);
++ });
+ else if constexpr (is_same_v<_Up, _Tp>)
+ return _CommonImpl::template _S_load<_Tp, _S_full_size<_Tp>,
+ _Np * sizeof(_Tp)>(__mem);
+@@ -1470,13 +1490,12 @@ template <typename _Abi, typename>
+ constexpr size_t __n_loads = __bytes_to_load / __max_load_size;
+ constexpr size_t __elements_per_load = _Np / __n_loads;
+ return __call_with_n_evaluations<__n_loads>(
+- [](auto... __uncvted) {
+- return __convert<_SimdMember<_Tp>>(__uncvted...);
+- },
+- [&](auto __i) {
+- return _CommonImpl::template _S_load<_Up, __elements_per_load>(
+- __mem + __i * __elements_per_load);
+- });
++ [](auto... __uncvted) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __convert<_SimdMember<_Tp>>(__uncvted...);
++ }, [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return _CommonImpl::template _S_load<_Up, __elements_per_load>(
++ __mem + __i * __elements_per_load);
++ });
+ }
+ else if constexpr (__bytes_to_load % (__max_load_size / 2) == 0
+ && __max_load_size > 16)
+@@ -1485,45 +1504,50 @@ template <typename _Abi, typename>
+ = __bytes_to_load / (__max_load_size / 2);
+ constexpr size_t __elements_per_load = _Np / __n_loads;
+ return __call_with_n_evaluations<__n_loads>(
+- [](auto... __uncvted) {
+- return __convert<_SimdMember<_Tp>>(__uncvted...);
+- },
+- [&](auto __i) {
+- return _CommonImpl::template _S_load<_Up, __elements_per_load>(
+- __mem + __i * __elements_per_load);
+- });
++ [](auto... __uncvted) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __convert<_SimdMember<_Tp>>(__uncvted...);
++ }, [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return _CommonImpl::template _S_load<_Up, __elements_per_load>(
++ __mem + __i * __elements_per_load);
++ });
+ }
+ else // e.g. int[] -> <char, 9>
+ return __call_with_subscripts(
+- __mem, make_index_sequence<_Np>(), [](auto... __args) {
+- return __vector_type_t<_Tp, _S_full_size<_Tp>>{
+- static_cast<_Tp>(__args)...};
+- });
++ __mem, make_index_sequence<_Np>(),
++ [](auto... __args) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __vector_type_t<_Tp, _S_full_size<_Tp>>{static_cast<_Tp>(__args)...};
++ });
+ }
+
+ // _S_masked_load {{{2
+ template <typename _Tp, size_t _Np, typename _Up>
+- static inline _SimdWrapper<_Tp, _Np>
++ static constexpr inline _SimdWrapper<_Tp, _Np>
+ _S_masked_load(_SimdWrapper<_Tp, _Np> __merge, _MaskMember<_Tp> __k,
+ const _Up* __mem) noexcept
+ {
+- _BitOps::_S_bit_iteration(_MaskImpl::_S_to_bits(__k), [&](auto __i) {
+- __merge._M_set(__i, static_cast<_Tp>(__mem[__i]));
+- });
++ _BitOps::_S_bit_iteration(_MaskImpl::_S_to_bits(__k),
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ __merge._M_set(__i, static_cast<_Tp>(__mem[__i]));
++ });
+ return __merge;
+ }
+
+ // _S_store {{{2
+ template <typename _Tp, typename _Up>
+- _GLIBCXX_SIMD_INTRINSIC static void
++ _GLIBCXX_SIMD_INTRINSIC static constexpr void
+ _S_store(_SimdMember<_Tp> __v, _Up* __mem, _TypeTag<_Tp>) noexcept
+ {
+ // TODO: converting int -> "smaller int" can be optimized with AVX512
+ constexpr size_t _Np = _S_size<_Tp>;
+ constexpr size_t __max_store_size
+ = _SuperImpl::template _S_max_store_size<_Up>;
+- if constexpr (sizeof(_Up) > 8)
+- __execute_n_times<_Np>([&](auto __i) constexpr {
++ if (__builtin_is_constant_evaluated())
++ {
++ for (size_t __i = 0; __i < _Np; ++__i)
++ __mem[__i] = __v[__i];
++ }
++ else if constexpr (sizeof(_Up) > 8)
++ __execute_n_times<_Np>([&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ __mem[__i] = __v[__i];
+ });
+ else if constexpr (is_same_v<_Up, _Tp>)
+@@ -1540,9 +1564,10 @@ template <typename _Abi, typename>
+ using _V = __vector_type_t<_Up, __vsize>;
+ const array<_V, __stores> __converted
+ = __convert_all<_V, __stores>(__v);
+- __execute_n_times<__full_stores>([&](auto __i) constexpr {
+- _CommonImpl::_S_store(__converted[__i], __mem + __i * __vsize);
+- });
++ __execute_n_times<__full_stores>(
++ [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ _CommonImpl::_S_store(__converted[__i], __mem + __i * __vsize);
++ });
+ if constexpr (__full_stores < __stores)
+ _CommonImpl::template _S_store<(_Np - __full_stores * __vsize)
+ * sizeof(_Up)>(
+@@ -1552,12 +1577,12 @@ template <typename _Abi, typename>
+
+ // _S_masked_store_nocvt {{{2
+ template <typename _Tp, size_t _Np>
+- _GLIBCXX_SIMD_INTRINSIC static void
+- _S_masked_store_nocvt(_SimdWrapper<_Tp, _Np> __v, _Tp* __mem,
+- _MaskMember<_Tp> __k)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr void
++ _S_masked_store_nocvt(_SimdWrapper<_Tp, _Np> __v, _Tp* __mem, _MaskMember<_Tp> __k)
+ {
+ _BitOps::_S_bit_iteration(
+- _MaskImpl::_S_to_bits(__k), [&](auto __i) constexpr {
++ _MaskImpl::_S_to_bits(__k),
++ [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ __mem[__i] = __v[__i];
+ });
+ }
+@@ -1565,9 +1590,8 @@ template <typename _Abi, typename>
+ // _S_masked_store {{{2
+ template <typename _TW, typename _TVT = _VectorTraits<_TW>,
+ typename _Tp = typename _TVT::value_type, typename _Up>
+- static inline void
+- _S_masked_store(const _TW __v, _Up* __mem, const _MaskMember<_Tp> __k)
+- noexcept
++ static constexpr inline void
++ _S_masked_store(const _TW __v, _Up* __mem, const _MaskMember<_Tp> __k) noexcept
+ {
+ constexpr size_t _TV_size = _S_size<_Tp>;
+ [[maybe_unused]] const auto __vi = __to_intrin(__v);
+@@ -1579,7 +1603,7 @@ template <typename _Abi, typename>
+ _Up> || (is_integral_v<_Tp> && is_integral_v<_Up> && sizeof(_Tp) == sizeof(_Up)))
+ {
+ // bitwise or no conversion, reinterpret:
+- const _MaskMember<_Up> __kk = [&]() {
++ const _MaskMember<_Up> __kk = [&]() _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ if constexpr (__is_bitmask_v<decltype(__k)>)
+ return _MaskMember<_Up>(__k._M_data);
+ else
+@@ -1618,7 +1642,7 @@ template <typename _Abi, typename>
+ constexpr size_t _NParts = _S_full_size<_Tp> / _UW_size;
+ const array<_UV, _NAllStores> __converted
+ = __convert_all<_UV, _NAllStores>(__v);
+- __execute_n_times<_NFullStores>([&](auto __i) {
++ __execute_n_times<_NFullStores>([&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ _SuperImpl::_S_masked_store_nocvt(
+ _UW(__converted[__i]), __mem + __i * _UW_size,
+ _UAbi::_MaskImpl::template _S_convert<
+@@ -1637,10 +1661,10 @@ template <typename _Abi, typename>
+ }
+ }
+ else
+- _BitOps::_S_bit_iteration(
+- _MaskImpl::_S_to_bits(__k), [&](auto __i) constexpr {
+- __mem[__i] = static_cast<_Up>(__v[__i]);
+- });
++ _BitOps::_S_bit_iteration(_MaskImpl::_S_to_bits(__k),
++ [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ __mem[__i] = static_cast<_Up>(__v[__i]);
++ });
+ }
+
+ // _S_complement {{{2
+@@ -1794,7 +1818,7 @@ template <typename _Abi, typename>
+ // reductions {{{2
+ template <size_t _Np, size_t... _Is, size_t... _Zeros, typename _Tp,
+ typename _BinaryOperation>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _Tp
+ _S_reduce_partial(index_sequence<_Is...>, index_sequence<_Zeros...>,
+ simd<_Tp, _Abi> __x, _BinaryOperation&& __binary_op)
+ {
+@@ -1824,6 +1848,13 @@ template <typename _Abi, typename>
+ else if constexpr (_Np == 2)
+ return __binary_op(simd<_Tp, simd_abi::scalar>(__x[0]),
+ simd<_Tp, simd_abi::scalar>(__x[1]))[0];
++ else if (__builtin_is_constant_evaluated())
++ {
++ simd<_Tp, simd_abi::scalar> __acc = __x[0];
++ for (size_t __i = 1; __i < _Np; ++__i)
++ __acc = __binary_op(__acc, simd<_Tp, simd_abi::scalar>(__x[__i]));
++ return __acc[0];
++ }
+ else if constexpr (_Abi::template _S_is_partial<_Tp>) //{{{
+ {
+ [[maybe_unused]] constexpr auto __full_size
+@@ -1929,35 +1960,41 @@ template <typename _Abi, typename>
+ // frexp, modf and copysign implemented in simd_math.h
+ #define _GLIBCXX_SIMD_MATH_FALLBACK(__name) \
+ template <typename _Tp, typename... _More> \
+- static _Tp _S_##__name(const _Tp& __x, const _More&... __more) \
++ static _Tp \
++ _S_##__name(const _Tp& __x, const _More&... __more) \
+ { \
+ return __generate_vector<_Tp>( \
+- [&](auto __i) { return __name(__x[__i], __more[__i]...); }); \
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { \
++ return __name(__x[__i], __more[__i]...); \
++ }); \
+ }
+
+ #define _GLIBCXX_SIMD_MATH_FALLBACK_MASKRET(__name) \
+ template <typename _Tp, typename... _More> \
+- static typename _Tp::mask_type _S_##__name(const _Tp& __x, \
+- const _More&... __more) \
++ static typename _Tp::mask_type \
++ _S_##__name(const _Tp& __x, const _More&... __more) \
+ { \
+ return __generate_vector<_Tp>( \
+- [&](auto __i) { return __name(__x[__i], __more[__i]...); }); \
+- }
+-
+-#define _GLIBCXX_SIMD_MATH_FALLBACK_FIXEDRET(_RetTp, __name) \
+- template <typename _Tp, typename... _More> \
+- static auto _S_##__name(const _Tp& __x, const _More&... __more) \
+- { \
+- return __fixed_size_storage_t<_RetTp, \
+- _VectorTraits<_Tp>::_S_partial_width>:: \
+- _S_generate([&](auto __meta) constexpr { \
+- return __meta._S_generator( \
+- [&](auto __i) { \
+- return __name(__x[__meta._S_offset + __i], \
+- __more[__meta._S_offset + __i]...); \
+- }, \
+- static_cast<_RetTp*>(nullptr)); \
+- }); \
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { \
++ return __name(__x[__i], __more[__i]...); \
++ }); \
++ }
++
++#define _GLIBCXX_SIMD_MATH_FALLBACK_FIXEDRET(_RetTp, __name) \
++ template <typename _Tp, typename... _More> \
++ static auto \
++ _S_##__name(const _Tp& __x, const _More&... __more) \
++ { \
++ return __fixed_size_storage_t<_RetTp, \
++ _VectorTraits<_Tp>::_S_partial_width>:: \
++ _S_generate([&](auto __meta) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { \
++ return __meta._S_generator( \
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { \
++ return __name(__x[__meta._S_offset + __i], \
++ __more[__meta._S_offset + __i]...); \
++ }, \
++ static_cast<_RetTp*>(nullptr)); \
++ }); \
+ }
+
+ _GLIBCXX_SIMD_MATH_FALLBACK(acos)
+@@ -2010,7 +2047,7 @@ template <typename _Abi, typename>
+ _S_remquo(const _Tp __x, const _Tp __y,
+ __fixed_size_storage_t<int, _TVT::_S_partial_width>* __z)
+ {
+- return __generate_vector<_Tp>([&](auto __i) {
++ return __generate_vector<_Tp>([&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ int __tmp;
+ auto __r = remquo(__x[__i], __y[__i], &__tmp);
+ __z->_M_set(__i, __tmp);
+@@ -2094,22 +2131,22 @@ template <typename _Abi, typename>
+ #undef _GLIBCXX_SIMD_MATH_FALLBACK_FIXEDRET
+ // _S_abs {{{3
+ template <typename _Tp, size_t _Np>
+- _GLIBCXX_SIMD_INTRINSIC static _SimdWrapper<_Tp, _Np>
+- _S_abs(_SimdWrapper<_Tp, _Np> __x) noexcept
+- {
+- // if (__builtin_is_constant_evaluated())
+- // {
+- // return __x._M_data < 0 ? -__x._M_data : __x._M_data;
+- // }
+- if constexpr (is_floating_point_v<_Tp>)
+- // `v < 0 ? -v : v` cannot compile to the efficient implementation of
+- // masking the signbit off because it must consider v == -0
+-
+- // ~(-0.) & v would be easy, but breaks with fno-signed-zeros
+- return __and(_S_absmask<__vector_type_t<_Tp, _Np>>, __x._M_data);
+- else
+- return __x._M_data < 0 ? -__x._M_data : __x._M_data;
+- }
++ _GLIBCXX_SIMD_INTRINSIC static _SimdWrapper<_Tp, _Np>
++ _S_abs(_SimdWrapper<_Tp, _Np> __x) noexcept
++ {
++ // if (__builtin_is_constant_evaluated())
++ // {
++ // return __x._M_data < 0 ? -__x._M_data : __x._M_data;
++ // }
++ if constexpr (is_floating_point_v<_Tp>)
++ // `v < 0 ? -v : v` cannot compile to the efficient implementation of
++ // masking the signbit off because it must consider v == -0
++
++ // ~(-0.) & v would be easy, but breaks with fno-signed-zeros
++ return __and(_S_absmask<__vector_type_t<_Tp, _Np>>, __x._M_data);
++ else
++ return __x._M_data < 0 ? -__x._M_data : __x._M_data;
++ }
+
+ // }}}3
+ // _S_plus_minus {{{
+@@ -2117,339 +2154,337 @@ template <typename _Abi, typename>
+ // - _TV must be __vector_type_t<floating-point type, N>.
+ // - _UV must be _TV or floating-point type.
+ template <typename _TV, typename _UV>
+- _GLIBCXX_SIMD_INTRINSIC static constexpr _TV _S_plus_minus(_TV __x,
+- _UV __y) noexcept
+- {
+- #if defined __i386__ && !defined __SSE_MATH__
+- if constexpr (sizeof(__x) == 8)
+- { // operations on __x would use the FPU
+- static_assert(is_same_v<_TV, __vector_type_t<float, 2>>);
+- const auto __x4 = __vector_bitcast<float, 4>(__x);
+- if constexpr (is_same_v<_TV, _UV>)
+- return __vector_bitcast<float, 2>(
+- _S_plus_minus(__x4, __vector_bitcast<float, 4>(__y)));
+- else
+- return __vector_bitcast<float, 2>(_S_plus_minus(__x4, __y));
+- }
+- #endif
+- #if !defined __clang__ && __GCC_IEC_559 == 0
+- if (__builtin_is_constant_evaluated()
+- || (__builtin_constant_p(__x) && __builtin_constant_p(__y)))
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _TV
++ _S_plus_minus(_TV __x, _UV __y) noexcept
++ {
++#if defined __i386__ && !defined __SSE_MATH__
++ if constexpr (sizeof(__x) == 8)
++ { // operations on __x would use the FPU
++ static_assert(is_same_v<_TV, __vector_type_t<float, 2>>);
++ const auto __x4 = __vector_bitcast<float, 4>(__x);
++ if constexpr (is_same_v<_TV, _UV>)
++ return __vector_bitcast<float, 2>(
++ _S_plus_minus(__x4, __vector_bitcast<float, 4>(__y)));
++ else
++ return __vector_bitcast<float, 2>(_S_plus_minus(__x4, __y));
++ }
++#endif
++#if !defined __clang__ && __GCC_IEC_559 == 0
++ if (__builtin_is_constant_evaluated()
++ || (__builtin_constant_p(__x) && __builtin_constant_p(__y)))
++ return (__x + __y) - __y;
++ else
++ return [&] {
++ __x += __y;
++ if constexpr(__have_sse)
++ {
++ if constexpr (sizeof(__x) >= 16)
++ asm("" : "+x"(__x));
++ else if constexpr (is_same_v<__vector_type_t<float, 2>, _TV>)
++ asm("" : "+x"(__x[0]), "+x"(__x[1]));
++ else
++ __assert_unreachable<_TV>();
++ }
++ else if constexpr(__have_neon)
++ asm("" : "+w"(__x));
++ else if constexpr (__have_power_vmx)
++ {
++ if constexpr (is_same_v<__vector_type_t<float, 2>, _TV>)
++ asm("" : "+fgr"(__x[0]), "+fgr"(__x[1]));
++ else
++ asm("" : "+v"(__x));
++ }
++ else
++ asm("" : "+g"(__x));
++ return __x - __y;
++ }();
++#else
+ return (__x + __y) - __y;
+- else
+- return [&] {
+- __x += __y;
+- if constexpr(__have_sse)
+- {
+- if constexpr (sizeof(__x) >= 16)
+- asm("" : "+x"(__x));
+- else if constexpr (is_same_v<__vector_type_t<float, 2>, _TV>)
+- asm("" : "+x"(__x[0]), "+x"(__x[1]));
+- else
+- __assert_unreachable<_TV>();
+- }
+- else if constexpr(__have_neon)
+- asm("" : "+w"(__x));
+- else if constexpr (__have_power_vmx)
+- {
+- if constexpr (is_same_v<__vector_type_t<float, 2>, _TV>)
+- asm("" : "+fgr"(__x[0]), "+fgr"(__x[1]));
+- else
+- asm("" : "+v"(__x));
+- }
+- else
+- asm("" : "+g"(__x));
+- return __x - __y;
+- }();
+- #else
+- return (__x + __y) - __y;
+- #endif
+- }
++#endif
++ }
+
+ // }}}
+ // _S_nearbyint {{{3
+ template <typename _Tp, typename _TVT = _VectorTraits<_Tp>>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_nearbyint(_Tp __x_) noexcept
+- {
+- using value_type = typename _TVT::value_type;
+- using _V = typename _TVT::type;
+- const _V __x = __x_;
+- const _V __absx = __and(__x, _S_absmask<_V>);
+- static_assert(__CHAR_BIT__ * sizeof(1ull) >= __digits_v<value_type>);
+- _GLIBCXX_SIMD_USE_CONSTEXPR _V __shifter_abs
+- = _V() + (1ull << (__digits_v<value_type> - 1));
+- const _V __shifter = __or(__and(_S_signmask<_V>, __x), __shifter_abs);
+- const _V __shifted = _S_plus_minus(__x, __shifter);
+- return __absx < __shifter_abs ? __shifted : __x;
+- }
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_nearbyint(_Tp __x_) noexcept
++ {
++ using value_type = typename _TVT::value_type;
++ using _V = typename _TVT::type;
++ const _V __x = __x_;
++ const _V __absx = __and(__x, _S_absmask<_V>);
++ static_assert(__CHAR_BIT__ * sizeof(1ull) >= __digits_v<value_type>);
++ _GLIBCXX_SIMD_USE_CONSTEXPR _V __shifter_abs
++ = _V() + (1ull << (__digits_v<value_type> - 1));
++ const _V __shifter = __or(__and(_S_signmask<_V>, __x), __shifter_abs);
++ const _V __shifted = _S_plus_minus(__x, __shifter);
++ return __absx < __shifter_abs ? __shifted : __x;
++ }
+
+ // _S_rint {{{3
+ template <typename _Tp, typename _TVT = _VectorTraits<_Tp>>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_rint(_Tp __x) noexcept
+- {
+- return _SuperImpl::_S_nearbyint(__x);
+- }
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_rint(_Tp __x) noexcept
++ { return _SuperImpl::_S_nearbyint(__x); }
+
+ // _S_trunc {{{3
+ template <typename _Tp, size_t _Np>
+- _GLIBCXX_SIMD_INTRINSIC static _SimdWrapper<_Tp, _Np>
+- _S_trunc(_SimdWrapper<_Tp, _Np> __x)
+- {
+- using _V = __vector_type_t<_Tp, _Np>;
+- const _V __absx = __and(__x._M_data, _S_absmask<_V>);
+- static_assert(__CHAR_BIT__ * sizeof(1ull) >= __digits_v<_Tp>);
+- constexpr _Tp __shifter = 1ull << (__digits_v<_Tp> - 1);
+- _V __truncated = _S_plus_minus(__absx, __shifter);
+- __truncated -= __truncated > __absx ? _V() + 1 : _V();
+- return __absx < __shifter ? __or(__xor(__absx, __x._M_data), __truncated)
+- : __x._M_data;
+- }
++ _GLIBCXX_SIMD_INTRINSIC static _SimdWrapper<_Tp, _Np>
++ _S_trunc(_SimdWrapper<_Tp, _Np> __x)
++ {
++ using _V = __vector_type_t<_Tp, _Np>;
++ const _V __absx = __and(__x._M_data, _S_absmask<_V>);
++ static_assert(__CHAR_BIT__ * sizeof(1ull) >= __digits_v<_Tp>);
++ constexpr _Tp __shifter = 1ull << (__digits_v<_Tp> - 1);
++ _V __truncated = _S_plus_minus(__absx, __shifter);
++ __truncated -= __truncated > __absx ? _V() + 1 : _V();
++ return __absx < __shifter ? __or(__xor(__absx, __x._M_data), __truncated)
++ : __x._M_data;
++ }
+
+ // _S_round {{{3
+ template <typename _Tp, size_t _Np>
+- _GLIBCXX_SIMD_INTRINSIC static _SimdWrapper<_Tp, _Np>
+- _S_round(_SimdWrapper<_Tp, _Np> __x)
+- {
+- const auto __abs_x = _SuperImpl::_S_abs(__x);
+- const auto __t_abs = _SuperImpl::_S_trunc(__abs_x)._M_data;
+- const auto __r_abs // round(abs(x)) =
+- = __t_abs + (__abs_x._M_data - __t_abs >= _Tp(.5) ? _Tp(1) : 0);
+- return __or(__xor(__abs_x._M_data, __x._M_data), __r_abs);
+- }
++ _GLIBCXX_SIMD_INTRINSIC static _SimdWrapper<_Tp, _Np>
++ _S_round(_SimdWrapper<_Tp, _Np> __x)
++ {
++ const auto __abs_x = _SuperImpl::_S_abs(__x);
++ const auto __t_abs = _SuperImpl::_S_trunc(__abs_x)._M_data;
++ const auto __r_abs // round(abs(x)) =
++ = __t_abs + (__abs_x._M_data - __t_abs >= _Tp(.5) ? _Tp(1) : 0);
++ return __or(__xor(__abs_x._M_data, __x._M_data), __r_abs);
++ }
+
+ // _S_floor {{{3
+ template <typename _Tp, size_t _Np>
+- _GLIBCXX_SIMD_INTRINSIC static _SimdWrapper<_Tp, _Np>
+- _S_floor(_SimdWrapper<_Tp, _Np> __x)
+- {
+- const auto __y = _SuperImpl::_S_trunc(__x)._M_data;
+- const auto __negative_input
+- = __vector_bitcast<_Tp>(__x._M_data < __vector_broadcast<_Np, _Tp>(0));
+- const auto __mask
+- = __andnot(__vector_bitcast<_Tp>(__y == __x._M_data), __negative_input);
+- return __or(__andnot(__mask, __y),
+- __and(__mask, __y - __vector_broadcast<_Np, _Tp>(1)));
+- }
++ _GLIBCXX_SIMD_INTRINSIC static _SimdWrapper<_Tp, _Np>
++ _S_floor(_SimdWrapper<_Tp, _Np> __x)
++ {
++ const auto __y = _SuperImpl::_S_trunc(__x)._M_data;
++ const auto __negative_input
++ = __vector_bitcast<_Tp>(__x._M_data < __vector_broadcast<_Np, _Tp>(0));
++ const auto __mask
++ = __andnot(__vector_bitcast<_Tp>(__y == __x._M_data), __negative_input);
++ return __or(__andnot(__mask, __y),
++ __and(__mask, __y - __vector_broadcast<_Np, _Tp>(1)));
++ }
+
+ // _S_ceil {{{3
+ template <typename _Tp, size_t _Np>
+- _GLIBCXX_SIMD_INTRINSIC static _SimdWrapper<_Tp, _Np>
+- _S_ceil(_SimdWrapper<_Tp, _Np> __x)
+- {
+- const auto __y = _SuperImpl::_S_trunc(__x)._M_data;
+- const auto __negative_input
+- = __vector_bitcast<_Tp>(__x._M_data < __vector_broadcast<_Np, _Tp>(0));
+- const auto __inv_mask
+- = __or(__vector_bitcast<_Tp>(__y == __x._M_data), __negative_input);
+- return __or(__and(__inv_mask, __y),
+- __andnot(__inv_mask, __y + __vector_broadcast<_Np, _Tp>(1)));
+- }
++ _GLIBCXX_SIMD_INTRINSIC static _SimdWrapper<_Tp, _Np>
++ _S_ceil(_SimdWrapper<_Tp, _Np> __x)
++ {
++ const auto __y = _SuperImpl::_S_trunc(__x)._M_data;
++ const auto __negative_input
++ = __vector_bitcast<_Tp>(__x._M_data < __vector_broadcast<_Np, _Tp>(0));
++ const auto __inv_mask
++ = __or(__vector_bitcast<_Tp>(__y == __x._M_data), __negative_input);
++ return __or(__and(__inv_mask, __y),
++ __andnot(__inv_mask, __y + __vector_broadcast<_Np, _Tp>(1)));
++ }
+
+ // _S_isnan {{{3
+ template <typename _Tp, size_t _Np>
+- _GLIBCXX_SIMD_INTRINSIC static _MaskMember<_Tp>
+- _S_isnan([[maybe_unused]] _SimdWrapper<_Tp, _Np> __x)
+- {
+- #if __FINITE_MATH_ONLY__
+- return {}; // false
+- #elif !defined __SUPPORT_SNAN__
+- return ~(__x._M_data == __x._M_data);
+- #elif defined __STDC_IEC_559__
+- using _Ip = __int_for_sizeof_t<_Tp>;
+- const auto __absn = __vector_bitcast<_Ip>(_SuperImpl::_S_abs(__x));
+- const auto __infn
+- = __vector_bitcast<_Ip>(__vector_broadcast<_Np>(__infinity_v<_Tp>));
+- return __infn < __absn;
+- #else
+- #error "Not implemented: how to support SNaN but non-IEC559 floating-point?"
+- #endif
+- }
++ _GLIBCXX_SIMD_INTRINSIC static _MaskMember<_Tp>
++ _S_isnan([[maybe_unused]] _SimdWrapper<_Tp, _Np> __x)
++ {
++#if __FINITE_MATH_ONLY__
++ return {}; // false
++#elif !defined __SUPPORT_SNAN__
++ return ~(__x._M_data == __x._M_data);
++#elif defined __STDC_IEC_559__
++ using _Ip = __int_for_sizeof_t<_Tp>;
++ const auto __absn = __vector_bitcast<_Ip>(_SuperImpl::_S_abs(__x));
++ const auto __infn
++ = __vector_bitcast<_Ip>(__vector_broadcast<_Np>(__infinity_v<_Tp>));
++ return __infn < __absn;
++#else
++#error "Not implemented: how to support SNaN but non-IEC559 floating-point?"
++#endif
++ }
+
+ // _S_isfinite {{{3
+ template <typename _Tp, size_t _Np>
+- _GLIBCXX_SIMD_INTRINSIC static _MaskMember<_Tp>
+- _S_isfinite([[maybe_unused]] _SimdWrapper<_Tp, _Np> __x)
+- {
+- #if __FINITE_MATH_ONLY__
+- using _UV = typename _MaskMember<_Tp>::_BuiltinType;
+- _GLIBCXX_SIMD_USE_CONSTEXPR _UV __alltrue = ~_UV();
+- return __alltrue;
+- #else
+- // if all exponent bits are set, __x is either inf or NaN
+- using _Ip = __int_for_sizeof_t<_Tp>;
+- const auto __absn = __vector_bitcast<_Ip>(_SuperImpl::_S_abs(__x));
+- const auto __maxn
+- = __vector_bitcast<_Ip>(__vector_broadcast<_Np>(__finite_max_v<_Tp>));
+- return __absn <= __maxn;
+- #endif
+- }
++ _GLIBCXX_SIMD_INTRINSIC static _MaskMember<_Tp>
++ _S_isfinite([[maybe_unused]] _SimdWrapper<_Tp, _Np> __x)
++ {
++#if __FINITE_MATH_ONLY__
++ using _UV = typename _MaskMember<_Tp>::_BuiltinType;
++ _GLIBCXX_SIMD_USE_CONSTEXPR _UV __alltrue = ~_UV();
++ return __alltrue;
++#else
++ // if all exponent bits are set, __x is either inf or NaN
++ using _Ip = __int_for_sizeof_t<_Tp>;
++ const auto __absn = __vector_bitcast<_Ip>(_SuperImpl::_S_abs(__x));
++ const auto __maxn
++ = __vector_bitcast<_Ip>(__vector_broadcast<_Np>(__finite_max_v<_Tp>));
++ return __absn <= __maxn;
++#endif
++ }
+
+ // _S_isunordered {{{3
+ template <typename _Tp, size_t _Np>
+- _GLIBCXX_SIMD_INTRINSIC static _MaskMember<_Tp>
+- _S_isunordered(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
+- {
+- return __or(_S_isnan(__x), _S_isnan(__y));
+- }
++ _GLIBCXX_SIMD_INTRINSIC static _MaskMember<_Tp>
++ _S_isunordered(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
++ { return __or(_S_isnan(__x), _S_isnan(__y)); }
+
+ // _S_signbit {{{3
+ template <typename _Tp, size_t _Np>
+- _GLIBCXX_SIMD_INTRINSIC static _MaskMember<_Tp>
+- _S_signbit(_SimdWrapper<_Tp, _Np> __x)
+- {
+- using _Ip = __int_for_sizeof_t<_Tp>;
+- return __vector_bitcast<_Ip>(__x) < 0;
+- // Arithmetic right shift (SRA) would also work (instead of compare), but
+- // 64-bit SRA isn't available on x86 before AVX512. And in general,
+- // compares are more likely to be efficient than SRA.
+- }
++ _GLIBCXX_SIMD_INTRINSIC static _MaskMember<_Tp>
++ _S_signbit(_SimdWrapper<_Tp, _Np> __x)
++ {
++ using _Ip = __int_for_sizeof_t<_Tp>;
++ return __vector_bitcast<_Ip>(__x) < 0;
++ // Arithmetic right shift (SRA) would also work (instead of compare), but
++ // 64-bit SRA isn't available on x86 before AVX512. And in general,
++ // compares are more likely to be efficient than SRA.
++ }
+
+ // _S_isinf {{{3
+ template <typename _Tp, size_t _Np>
+- _GLIBCXX_SIMD_INTRINSIC static _MaskMember<_Tp>
+- _S_isinf([[maybe_unused]] _SimdWrapper<_Tp, _Np> __x)
+- {
+- #if __FINITE_MATH_ONLY__
+- return {}; // false
+- #else
+- return _SuperImpl::template _S_equal_to<_Tp, _Np>(_SuperImpl::_S_abs(__x),
+- __vector_broadcast<_Np>(
+- __infinity_v<_Tp>));
+- // alternative:
+- // compare to inf using the corresponding integer type
+- /*
+- return
+- __vector_bitcast<_Tp>(__vector_bitcast<__int_for_sizeof_t<_Tp>>(
+- _S_abs(__x)._M_data)
+- ==
+- __vector_bitcast<__int_for_sizeof_t<_Tp>>(__vector_broadcast<_Np>(
+- __infinity_v<_Tp>)));
+- */
+- #endif
+- }
++ _GLIBCXX_SIMD_INTRINSIC static _MaskMember<_Tp>
++ _S_isinf([[maybe_unused]] _SimdWrapper<_Tp, _Np> __x)
++ {
++#if __FINITE_MATH_ONLY__
++ return {}; // false
++#else
++ return _SuperImpl::template _S_equal_to<_Tp, _Np>(_SuperImpl::_S_abs(__x),
++ __vector_broadcast<_Np>(
++ __infinity_v<_Tp>));
++ // alternative:
++ // compare to inf using the corresponding integer type
++ /*
++ return
++ __vector_bitcast<_Tp>(__vector_bitcast<__int_for_sizeof_t<_Tp>>(
++ _S_abs(__x)._M_data)
++ ==
++ __vector_bitcast<__int_for_sizeof_t<_Tp>>(__vector_broadcast<_Np>(
++ __infinity_v<_Tp>)));
++ */
++#endif
++ }
+
+ // _S_isnormal {{{3
+ template <typename _Tp, size_t _Np>
+- _GLIBCXX_SIMD_INTRINSIC static _MaskMember<_Tp>
+- _S_isnormal(_SimdWrapper<_Tp, _Np> __x)
+- {
+- using _Ip = __int_for_sizeof_t<_Tp>;
+- const auto __absn = __vector_bitcast<_Ip>(_SuperImpl::_S_abs(__x));
+- const auto __minn
+- = __vector_bitcast<_Ip>(__vector_broadcast<_Np>(__norm_min_v<_Tp>));
+- #if __FINITE_MATH_ONLY__
+- return __absn >= __minn;
+- #else
+- const auto __maxn
+- = __vector_bitcast<_Ip>(__vector_broadcast<_Np>(__finite_max_v<_Tp>));
+- return __minn <= __absn && __absn <= __maxn;
+- #endif
+- }
++ _GLIBCXX_SIMD_INTRINSIC static _MaskMember<_Tp>
++ _S_isnormal(_SimdWrapper<_Tp, _Np> __x)
++ {
++ using _Ip = __int_for_sizeof_t<_Tp>;
++ const auto __absn = __vector_bitcast<_Ip>(_SuperImpl::_S_abs(__x));
++ const auto __minn
++ = __vector_bitcast<_Ip>(__vector_broadcast<_Np>(__norm_min_v<_Tp>));
++#if __FINITE_MATH_ONLY__
++ return __absn >= __minn;
++#else
++ const auto __maxn
++ = __vector_bitcast<_Ip>(__vector_broadcast<_Np>(__finite_max_v<_Tp>));
++ return __minn <= __absn && __absn <= __maxn;
++#endif
++ }
+
+ // _S_fpclassify {{{3
+ template <typename _Tp, size_t _Np>
+- _GLIBCXX_SIMD_INTRINSIC static __fixed_size_storage_t<int, _Np>
+- _S_fpclassify(_SimdWrapper<_Tp, _Np> __x)
+- {
+- using _I = __int_for_sizeof_t<_Tp>;
+- const auto __xn
+- = __vector_bitcast<_I>(__to_intrin(_SuperImpl::_S_abs(__x)));
+- constexpr size_t _NI = sizeof(__xn) / sizeof(_I);
+- _GLIBCXX_SIMD_USE_CONSTEXPR auto __minn
+- = __vector_bitcast<_I>(__vector_broadcast<_NI>(__norm_min_v<_Tp>));
+- _GLIBCXX_SIMD_USE_CONSTEXPR auto __infn
+- = __vector_bitcast<_I>(__vector_broadcast<_NI>(__infinity_v<_Tp>));
+-
+- _GLIBCXX_SIMD_USE_CONSTEXPR auto __fp_normal
+- = __vector_broadcast<_NI, _I>(FP_NORMAL);
+- #if !__FINITE_MATH_ONLY__
+- _GLIBCXX_SIMD_USE_CONSTEXPR auto __fp_nan
+- = __vector_broadcast<_NI, _I>(FP_NAN);
+- _GLIBCXX_SIMD_USE_CONSTEXPR auto __fp_infinite
+- = __vector_broadcast<_NI, _I>(FP_INFINITE);
+- #endif
+- #ifndef __FAST_MATH__
+- _GLIBCXX_SIMD_USE_CONSTEXPR auto __fp_subnormal
+- = __vector_broadcast<_NI, _I>(FP_SUBNORMAL);
+- #endif
+- _GLIBCXX_SIMD_USE_CONSTEXPR auto __fp_zero
+- = __vector_broadcast<_NI, _I>(FP_ZERO);
++ _GLIBCXX_SIMD_INTRINSIC static __fixed_size_storage_t<int, _Np>
++ _S_fpclassify(_SimdWrapper<_Tp, _Np> __x)
++ {
++ using _I = __int_for_sizeof_t<_Tp>;
++ const auto __xn
++ = __vector_bitcast<_I>(__to_intrin(_SuperImpl::_S_abs(__x)));
++ constexpr size_t _NI = sizeof(__xn) / sizeof(_I);
++ _GLIBCXX_SIMD_USE_CONSTEXPR auto __minn
++ = __vector_bitcast<_I>(__vector_broadcast<_NI>(__norm_min_v<_Tp>));
++
++ _GLIBCXX_SIMD_USE_CONSTEXPR auto __fp_normal
++ = __vector_broadcast<_NI, _I>(FP_NORMAL);
++#if !__FINITE_MATH_ONLY__
++ _GLIBCXX_SIMD_USE_CONSTEXPR auto __infn
++ = __vector_bitcast<_I>(__vector_broadcast<_NI>(__infinity_v<_Tp>));
++ _GLIBCXX_SIMD_USE_CONSTEXPR auto __fp_nan
++ = __vector_broadcast<_NI, _I>(FP_NAN);
++ _GLIBCXX_SIMD_USE_CONSTEXPR auto __fp_infinite
++ = __vector_broadcast<_NI, _I>(FP_INFINITE);
++#endif
++#ifndef __FAST_MATH__
++ _GLIBCXX_SIMD_USE_CONSTEXPR auto __fp_subnormal
++ = __vector_broadcast<_NI, _I>(FP_SUBNORMAL);
++#endif
++ _GLIBCXX_SIMD_USE_CONSTEXPR auto __fp_zero
++ = __vector_broadcast<_NI, _I>(FP_ZERO);
+
+- __vector_type_t<_I, _NI>
+- __tmp = __xn < __minn
++ __vector_type_t<_I, _NI>
++ __tmp = __xn < __minn
+ #ifdef __FAST_MATH__
+- ? __fp_zero
++ ? __fp_zero
+ #else
+- ? (__xn == 0 ? __fp_zero : __fp_subnormal)
++ ? (__xn == 0 ? __fp_zero : __fp_subnormal)
+ #endif
+ #if __FINITE_MATH_ONLY__
+- : __fp_normal;
++ : __fp_normal;
+ #else
+- : (__xn < __infn ? __fp_normal
+- : (__xn == __infn ? __fp_infinite : __fp_nan));
++ : (__xn < __infn ? __fp_normal
++ : (__xn == __infn ? __fp_infinite : __fp_nan));
+ #endif
+
+- if constexpr (sizeof(_I) == sizeof(int))
+- {
+- using _FixedInt = __fixed_size_storage_t<int, _Np>;
+- const auto __as_int = __vector_bitcast<int, _Np>(__tmp);
+- if constexpr (_FixedInt::_S_tuple_size == 1)
+- return {__as_int};
+- else if constexpr (_FixedInt::_S_tuple_size == 2
+- && is_same_v<
+- typename _FixedInt::_SecondType::_FirstAbi,
+- simd_abi::scalar>)
+- return {__extract<0, 2>(__as_int), __as_int[_Np - 1]};
+- else if constexpr (_FixedInt::_S_tuple_size == 2)
+- return {__extract<0, 2>(__as_int),
+- __auto_bitcast(__extract<1, 2>(__as_int))};
+- else
+- __assert_unreachable<_Tp>();
+- }
+- else if constexpr (_Np == 2 && sizeof(_I) == 8
+- && __fixed_size_storage_t<int, _Np>::_S_tuple_size == 2)
+- {
+- const auto __aslong = __vector_bitcast<_LLong>(__tmp);
+- return {int(__aslong[0]), {int(__aslong[1])}};
+- }
+- #if _GLIBCXX_SIMD_X86INTRIN
+- else if constexpr (sizeof(_Tp) == 8 && sizeof(__tmp) == 32
+- && __fixed_size_storage_t<int, _Np>::_S_tuple_size == 1)
+- return {_mm_packs_epi32(__to_intrin(__lo128(__tmp)),
+- __to_intrin(__hi128(__tmp)))};
+- else if constexpr (sizeof(_Tp) == 8 && sizeof(__tmp) == 64
+- && __fixed_size_storage_t<int, _Np>::_S_tuple_size == 1)
+- return {_mm512_cvtepi64_epi32(__to_intrin(__tmp))};
+- #endif // _GLIBCXX_SIMD_X86INTRIN
+- else if constexpr (__fixed_size_storage_t<int, _Np>::_S_tuple_size == 1)
+- return {__call_with_subscripts<_Np>(__vector_bitcast<_LLong>(__tmp),
+- [](auto... __l) {
+- return __make_wrapper<int>(__l...);
+- })};
+- else
+- __assert_unreachable<_Tp>();
+- }
++ if constexpr (sizeof(_I) == sizeof(int))
++ {
++ using _FixedInt = __fixed_size_storage_t<int, _Np>;
++ const auto __as_int = __vector_bitcast<int, _Np>(__tmp);
++ if constexpr (_FixedInt::_S_tuple_size == 1)
++ return {__as_int};
++ else if constexpr (_FixedInt::_S_tuple_size == 2
++ && is_same_v<
++ typename _FixedInt::_SecondType::_FirstAbi,
++ simd_abi::scalar>)
++ return {__extract<0, 2>(__as_int), __as_int[_Np - 1]};
++ else if constexpr (_FixedInt::_S_tuple_size == 2)
++ return {__extract<0, 2>(__as_int),
++ __auto_bitcast(__extract<1, 2>(__as_int))};
++ else
++ __assert_unreachable<_Tp>();
++ }
++ else if constexpr (_Np == 2 && sizeof(_I) == 8
++ && __fixed_size_storage_t<int, _Np>::_S_tuple_size == 2)
++ {
++ const auto __aslong = __vector_bitcast<_LLong>(__tmp);
++ return {int(__aslong[0]), {int(__aslong[1])}};
++ }
++#if _GLIBCXX_SIMD_X86INTRIN
++ else if constexpr (sizeof(_Tp) == 8 && sizeof(__tmp) == 32
++ && __fixed_size_storage_t<int, _Np>::_S_tuple_size == 1)
++ return {_mm_packs_epi32(__to_intrin(__lo128(__tmp)),
++ __to_intrin(__hi128(__tmp)))};
++ else if constexpr (sizeof(_Tp) == 8 && sizeof(__tmp) == 64
++ && __fixed_size_storage_t<int, _Np>::_S_tuple_size == 1)
++ return {_mm512_cvtepi64_epi32(__to_intrin(__tmp))};
++#endif // _GLIBCXX_SIMD_X86INTRIN
++ else if constexpr (__fixed_size_storage_t<int, _Np>::_S_tuple_size == 1)
++ return {__call_with_subscripts<_Np>(__vector_bitcast<_LLong>(__tmp),
++ [](auto... __l) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __make_wrapper<int>(__l...);
++ })};
++ else
++ __assert_unreachable<_Tp>();
++ }
+
+ // _S_increment & _S_decrement{{{2
+ template <typename _Tp, size_t _Np>
+- _GLIBCXX_SIMD_INTRINSIC static void
++ _GLIBCXX_SIMD_INTRINSIC static constexpr void
+ _S_increment(_SimdWrapper<_Tp, _Np>& __x)
+ { __x = __x._M_data + 1; }
+
+ template <typename _Tp, size_t _Np>
+- _GLIBCXX_SIMD_INTRINSIC static void
++ _GLIBCXX_SIMD_INTRINSIC static constexpr void
+ _S_decrement(_SimdWrapper<_Tp, _Np>& __x)
+ { __x = __x._M_data - 1; }
+
+ // smart_reference access {{{2
+ template <typename _Tp, size_t _Np, typename _Up>
+- _GLIBCXX_SIMD_INTRINSIC constexpr static void
++ _GLIBCXX_SIMD_INTRINSIC static constexpr void
+ _S_set(_SimdWrapper<_Tp, _Np>& __v, int __i, _Up&& __x) noexcept
+ { __v._M_set(__i, static_cast<_Up&&>(__x)); }
+
+ // _S_masked_assign{{{2
+ template <typename _Tp, typename _K, size_t _Np>
+- _GLIBCXX_SIMD_INTRINSIC static void
++ _GLIBCXX_SIMD_INTRINSIC static constexpr void
+ _S_masked_assign(_SimdWrapper<_K, _Np> __k, _SimdWrapper<_Tp, _Np>& __lhs,
+ __type_identity_t<_SimdWrapper<_Tp, _Np>> __rhs)
+ {
+@@ -2462,7 +2497,7 @@ template <typename _Abi, typename>
+ }
+
+ template <typename _Tp, typename _K, size_t _Np>
+- _GLIBCXX_SIMD_INTRINSIC static void
++ _GLIBCXX_SIMD_INTRINSIC static constexpr void
+ _S_masked_assign(_SimdWrapper<_K, _Np> __k, _SimdWrapper<_Tp, _Np>& __lhs,
+ __type_identity_t<_Tp> __rhs)
+ {
+@@ -2490,7 +2525,7 @@ template <typename _Abi, typename>
+
+ // _S_masked_cassign {{{2
+ template <typename _Op, typename _Tp, typename _K, size_t _Np>
+- _GLIBCXX_SIMD_INTRINSIC static void
++ _GLIBCXX_SIMD_INTRINSIC static constexpr void
+ _S_masked_cassign(const _SimdWrapper<_K, _Np> __k,
+ _SimdWrapper<_Tp, _Np>& __lhs,
+ const __type_identity_t<_SimdWrapper<_Tp, _Np>> __rhs,
+@@ -2506,7 +2541,7 @@ template <typename _Abi, typename>
+ }
+
+ template <typename _Op, typename _Tp, typename _K, size_t _Np>
+- _GLIBCXX_SIMD_INTRINSIC static void
++ _GLIBCXX_SIMD_INTRINSIC static constexpr void
+ _S_masked_cassign(const _SimdWrapper<_K, _Np> __k,
+ _SimdWrapper<_Tp, _Np>& __lhs,
+ const __type_identity_t<_Tp> __rhs, _Op __op)
+@@ -2515,7 +2550,7 @@ template <typename _Abi, typename>
+ // _S_masked_unary {{{2
+ template <template <typename> class _Op, typename _Tp, typename _K,
+ size_t _Np>
+- _GLIBCXX_SIMD_INTRINSIC static _SimdWrapper<_Tp, _Np>
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdWrapper<_Tp, _Np>
+ _S_masked_unary(const _SimdWrapper<_K, _Np> __k,
+ const _SimdWrapper<_Tp, _Np> __v)
+ {
+@@ -2525,8 +2560,31 @@ template <typename _Abi, typename>
+ _Op<decltype(__vv)> __op;
+ if (__k._M_is_constprop_all_of())
+ return __data(__op(__vv));
+- else
+- return _CommonImpl::_S_blend(__k, __v, __data(__op(__vv)));
++ else if constexpr (is_same_v<_Op<void>, __increment<void>>)
++ {
++ static_assert(not std::is_same_v<_K, bool>);
++ if constexpr (is_integral_v<_Tp>)
++ // Take a shortcut knowing that __k is an integer vector with values -1 or 0.
++ return __v._M_data - __vector_bitcast<_Tp>(__k._M_data);
++ else if constexpr (not __have_avx2)
++ return __v._M_data
++ + __vector_bitcast<_Tp>(__k._M_data & __builtin_bit_cast(
++ _K, _Tp(1)));
++ // starting with AVX2 it is more efficient to blend after add
++ }
++ else if constexpr (is_same_v<_Op<void>, __decrement<void>>)
++ {
++ static_assert(not std::is_same_v<_K, bool>);
++ if constexpr (is_integral_v<_Tp>)
++ // Take a shortcut knowing that __k is an integer vector with values -1 or 0.
++ return __v._M_data + __vector_bitcast<_Tp>(__k._M_data);
++ else if constexpr (not __have_avx2)
++ return __v._M_data
++ - __vector_bitcast<_Tp>(__k._M_data & __builtin_bit_cast(
++ _K, _Tp(1)));
++ // starting with AVX2 it is more efficient to blend after sub
++ }
++ return _CommonImpl::_S_blend(__k, __v, __data(__op(__vv)));
+ }
+
+ //}}}2
+@@ -2554,13 +2612,13 @@ struct _MaskImplBuiltinMixin
+ _S_to_maskvector(_BitMask<_Np, _Sanitized> __x)
+ {
+ static_assert(is_same_v<_Up, __int_for_sizeof_t<_Up>>);
+- return __generate_vector<__vector_type_t<_Up, _ToN>>([&](
+- auto __i) constexpr {
+- if constexpr (__i < _Np)
+- return __x[__i] ? ~_Up() : _Up();
+- else
+- return _Up();
+- });
++ return __generate_vector<__vector_type_t<_Up, _ToN>>(
++ [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ if constexpr (__i < _Np)
++ return __x[__i] ? ~_Up() : _Up();
++ else
++ return _Up();
++ });
+ }
+
+ template <typename _Up, size_t _UpN = 0, typename _Tp, size_t _Np,
+@@ -2601,13 +2659,13 @@ struct _MaskImplBuiltinMixin
+ -1, -1, -1, -1, -1>(__y); else
+ */
+ {
+- return __generate_vector<__vector_type_t<_Up, _ToN>>([&](
+- auto __i) constexpr {
+- if constexpr (__i < _Np)
+- return _Up(__x[__i.value]);
+- else
+- return _Up();
+- });
++ return __generate_vector<__vector_type_t<_Up, _ToN>>(
++ [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ if constexpr (__i < _Np)
++ return _Up(__x[__i.value]);
++ else
++ return _Up();
++ });
+ }
+ }
+ }
+@@ -2625,7 +2683,9 @@ struct _MaskImplBuiltinMixin
+ = __vector_bitcast<_Up>(__x) >> (sizeof(_Up) * __CHAR_BIT__ - 1);
+ _ULLong __r = 0;
+ __execute_n_times<_Np>(
+- [&](auto __i) { __r |= _ULLong(__bools[__i.value]) << __i; });
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ __r |= _ULLong(__bools[__i.value]) << __i;
++ });
+ return __r;
+ }
+
+@@ -2657,10 +2717,7 @@ template <typename _Abi, typename>
+ template <typename _Tp>
+ _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember<_Tp>
+ _S_broadcast(bool __x)
+- {
+- return __x ? _Abi::template _S_implicit_mask<_Tp>()
+- : _MaskMember<_Tp>();
+- }
++ { return __x ? _Abi::template _S_implicit_mask<_Tp>() : _MaskMember<_Tp>(); }
+
+ // }}}
+ // _S_load {{{
+@@ -2669,17 +2726,18 @@ template <typename _Abi, typename>
+ _S_load(const bool* __mem)
+ {
+ using _I = __int_for_sizeof_t<_Tp>;
+- if constexpr (sizeof(_Tp) == sizeof(bool))
+- {
+- const auto __bools
+- = _CommonImpl::template _S_load<_I, _S_size<_Tp>>(__mem);
+- // bool is {0, 1}, everything else is UB
+- return __bools > 0;
+- }
+- else
+- return __generate_vector<_I, _S_size<_Tp>>([&](auto __i) constexpr {
+- return __mem[__i] ? ~_I() : _I();
+- });
++ if (not __builtin_is_constant_evaluated())
++ if constexpr (sizeof(_Tp) == sizeof(bool))
++ {
++ const auto __bools
++ = _CommonImpl::template _S_load<_I, _S_size<_Tp>>(__mem);
++ // bool is {0, 1}, everything else is UB
++ return __bools > 0;
++ }
++ return __generate_vector<_I, _S_size<_Tp>>(
++ [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __mem[__i] ? ~_I() : _I();
++ });
+ }
+
+ // }}}
+@@ -2752,7 +2810,7 @@ template <typename _Abi, typename>
+ // AVX(2) has 32/64 bit maskload, but nothing at 8 bit granularity
+ auto __tmp = __wrapper_bitcast<__int_for_sizeof_t<_Tp>>(__merge);
+ _BitOps::_S_bit_iteration(_SuperImpl::_S_to_bits(__mask),
+- [&](auto __i) {
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ __tmp._M_set(__i, -__mem[__i]);
+ });
+ __merge = __wrapper_bitcast<_Tp>(__tmp);
+@@ -2761,10 +2819,10 @@ template <typename _Abi, typename>
+
+ // _S_store {{{2
+ template <typename _Tp, size_t _Np>
+- _GLIBCXX_SIMD_INTRINSIC static void _S_store(_SimdWrapper<_Tp, _Np> __v,
+- bool* __mem) noexcept
++ _GLIBCXX_SIMD_INTRINSIC static constexpr void
++ _S_store(_SimdWrapper<_Tp, _Np> __v, bool* __mem) noexcept
+ {
+- __execute_n_times<_Np>([&](auto __i) constexpr {
++ __execute_n_times<_Np>([&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ __mem[__i] = __v[__i];
+ });
+ }
+@@ -2775,31 +2833,27 @@ template <typename _Abi, typename>
+ _S_masked_store(const _SimdWrapper<_Tp, _Np> __v, bool* __mem,
+ const _SimdWrapper<_Tp, _Np> __k) noexcept
+ {
+- _BitOps::_S_bit_iteration(
+- _SuperImpl::_S_to_bits(__k), [&](auto __i) constexpr {
+- __mem[__i] = __v[__i];
+- });
++ _BitOps::_S_bit_iteration(_SuperImpl::_S_to_bits(__k),
++ [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ __mem[__i] = __v[__i];
++ });
+ }
+
+ // _S_from_bitmask{{{2
+ template <size_t _Np, typename _Tp>
+ _GLIBCXX_SIMD_INTRINSIC static _MaskMember<_Tp>
+ _S_from_bitmask(_SanitizedBitMask<_Np> __bits, _TypeTag<_Tp>)
+- {
+- return _SuperImpl::template _S_to_maskvector<_Tp, _S_size<_Tp>>(__bits);
+- }
++ { return _SuperImpl::template _S_to_maskvector<_Tp, _S_size<_Tp>>(__bits); }
+
+ // logical and bitwise operators {{{2
+ template <typename _Tp, size_t _Np>
+ _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdWrapper<_Tp, _Np>
+- _S_logical_and(const _SimdWrapper<_Tp, _Np>& __x,
+- const _SimdWrapper<_Tp, _Np>& __y)
++ _S_logical_and(const _SimdWrapper<_Tp, _Np>& __x, const _SimdWrapper<_Tp, _Np>& __y)
+ { return __and(__x._M_data, __y._M_data); }
+
+ template <typename _Tp, size_t _Np>
+ _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdWrapper<_Tp, _Np>
+- _S_logical_or(const _SimdWrapper<_Tp, _Np>& __x,
+- const _SimdWrapper<_Tp, _Np>& __y)
++ _S_logical_or(const _SimdWrapper<_Tp, _Np>& __x, const _SimdWrapper<_Tp, _Np>& __y)
+ { return __or(__x._M_data, __y._M_data); }
+
+ template <typename _Tp, size_t _Np>
+@@ -2815,26 +2869,23 @@ template <typename _Abi, typename>
+
+ template <typename _Tp, size_t _Np>
+ _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdWrapper<_Tp, _Np>
+- _S_bit_and(const _SimdWrapper<_Tp, _Np>& __x,
+- const _SimdWrapper<_Tp, _Np>& __y)
++ _S_bit_and(const _SimdWrapper<_Tp, _Np>& __x, const _SimdWrapper<_Tp, _Np>& __y)
+ { return __and(__x._M_data, __y._M_data); }
+
+ template <typename _Tp, size_t _Np>
+ _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdWrapper<_Tp, _Np>
+- _S_bit_or(const _SimdWrapper<_Tp, _Np>& __x,
+- const _SimdWrapper<_Tp, _Np>& __y)
++ _S_bit_or(const _SimdWrapper<_Tp, _Np>& __x, const _SimdWrapper<_Tp, _Np>& __y)
+ { return __or(__x._M_data, __y._M_data); }
+
+ template <typename _Tp, size_t _Np>
+ _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdWrapper<_Tp, _Np>
+- _S_bit_xor(const _SimdWrapper<_Tp, _Np>& __x,
+- const _SimdWrapper<_Tp, _Np>& __y)
++ _S_bit_xor(const _SimdWrapper<_Tp, _Np>& __x, const _SimdWrapper<_Tp, _Np>& __y)
+ { return __xor(__x._M_data, __y._M_data); }
+
+ // smart_reference access {{{2
+ template <typename _Tp, size_t _Np>
+- static constexpr void _S_set(_SimdWrapper<_Tp, _Np>& __k, int __i,
+- bool __x) noexcept
++ static constexpr void
++ _S_set(_SimdWrapper<_Tp, _Np>& __k, int __i, bool __x) noexcept
+ {
+ if constexpr (is_same_v<_Tp, bool>)
+ __k._M_set(__i, __x);
+@@ -2845,7 +2896,7 @@ template <typename _Abi, typename>
+ {
+ __k = __generate_from_n_evaluations<_Np,
+ __vector_type_t<_Tp, _Np>>(
+- [&](auto __j) {
++ [&](auto __j) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ if (__i == static_cast<int>(__j))
+ return _Tp(-__x);
+ else
+@@ -2860,15 +2911,13 @@ template <typename _Abi, typename>
+ // _S_masked_assign{{{2
+ template <typename _Tp, size_t _Np>
+ _GLIBCXX_SIMD_INTRINSIC static void
+- _S_masked_assign(_SimdWrapper<_Tp, _Np> __k,
+- _SimdWrapper<_Tp, _Np>& __lhs,
++ _S_masked_assign(_SimdWrapper<_Tp, _Np> __k, _SimdWrapper<_Tp, _Np>& __lhs,
+ __type_identity_t<_SimdWrapper<_Tp, _Np>> __rhs)
+ { __lhs = _CommonImpl::_S_blend(__k, __lhs, __rhs); }
+
+ template <typename _Tp, size_t _Np>
+ _GLIBCXX_SIMD_INTRINSIC static void
+- _S_masked_assign(_SimdWrapper<_Tp, _Np> __k,
+- _SimdWrapper<_Tp, _Np>& __lhs, bool __rhs)
++ _S_masked_assign(_SimdWrapper<_Tp, _Np> __k, _SimdWrapper<_Tp, _Np>& __lhs, bool __rhs)
+ {
+ if (__builtin_constant_p(__rhs))
+ {
+@@ -2890,7 +2939,8 @@ template <typename _Abi, typename>
+ {
+ return __call_with_subscripts(
+ __data(__k), make_index_sequence<_S_size<_Tp>>(),
+- [](const auto... __ent) constexpr { return (... && !(__ent == 0)); });
++ [](const auto... __ent) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
++ { return (... && !(__ent == 0)); });
+ }
+
+ // }}}
+@@ -2901,7 +2951,8 @@ template <typename _Abi, typename>
+ {
+ return __call_with_subscripts(
+ __data(__k), make_index_sequence<_S_size<_Tp>>(),
+- [](const auto... __ent) constexpr { return (... || !(__ent == 0)); });
++ [](const auto... __ent) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
++ { return (... || !(__ent == 0)); });
+ }
+
+ // }}}
+@@ -2912,7 +2963,8 @@ template <typename _Abi, typename>
+ {
+ return __call_with_subscripts(
+ __data(__k), make_index_sequence<_S_size<_Tp>>(),
+- [](const auto... __ent) constexpr { return (... && (__ent == 0)); });
++ [](const auto... __ent) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
++ { return (... && (__ent == 0)); });
+ }
+
+ // }}}
+@@ -2945,20 +2997,14 @@ template <typename _Abi, typename>
+ template <typename _Tp>
+ _GLIBCXX_SIMD_INTRINSIC static int
+ _S_find_first_set(simd_mask<_Tp, _Abi> __k)
+- {
+- return std::__countr_zero(
+- _SuperImpl::_S_to_bits(__data(__k))._M_to_bits());
+- }
++ { return std::__countr_zero(_SuperImpl::_S_to_bits(__data(__k))._M_to_bits()); }
+
+ // }}}
+ // _S_find_last_set {{{
+ template <typename _Tp>
+ _GLIBCXX_SIMD_INTRINSIC static int
+ _S_find_last_set(simd_mask<_Tp, _Abi> __k)
+- {
+- return std::__bit_width(
+- _SuperImpl::_S_to_bits(__data(__k))._M_to_bits()) - 1;
+- }
++ { return std::__bit_width(_SuperImpl::_S_to_bits(__data(__k))._M_to_bits()) - 1; }
+
+ // }}}
+ };
+--- a/src/libstdc++-v3/include/experimental/bits/simd_converter.h
++++ b/src/libstdc++-v3/include/experimental/bits/simd_converter.h
+@@ -121,7 +121,7 @@ template <typename _From, typename _To, int _Np>
+ {
+ return __call_with_subscripts(
+ __x, make_index_sequence<_Np>(),
+- [](auto... __values) constexpr->_Ret {
++ [](auto... __values) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA -> _Ret {
+ return __make_simd_tuple<_To, decltype((void) __values,
+ simd_abi::scalar())...>(
+ static_cast<_To>(__values)...);
+@@ -233,7 +233,9 @@ template <typename _From, typename _To, int _Np>
+ static_assert(_Ret::_FirstAbi::template _S_is_partial<_To>);
+ return _Ret{__generate_from_n_evaluations<
+ _Np, typename _VectorTraits<typename _Ret::_FirstType>::type>(
+- [&](auto __i) { return static_cast<_To>(__x[__i]); })};
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return static_cast<_To>(__x[__i]);
++ })};
+ }
+ else
+ {
+@@ -241,7 +243,7 @@ template <typename _From, typename _To, int _Np>
+ constexpr auto __n
+ = __div_roundup(_Ret::_S_first_size, _Arg::_S_first_size);
+ return __call_with_n_evaluations<__n>(
+- [&__x](auto... __uncvted) {
++ [&__x](auto... __uncvted) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ // assuming _Arg Abi tags for all __i are _Arg::_FirstAbi
+ _SimdConverter<_From, typename _Arg::_FirstAbi, _To,
+ typename _Ret::_FirstAbi>
+@@ -255,8 +257,9 @@ template <typename _From, typename _To, int _Np>
+ _From, simd_abi::fixed_size<_Np - _Ret::_S_first_size>, _To,
+ simd_abi::fixed_size<_Np - _Ret::_S_first_size>>()(
+ __simd_tuple_pop_front<_Ret::_S_first_size>(__x))};
+- },
+- [&__x](auto __i) { return __get_tuple_at<__i>(__x); });
++ }, [&__x](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __get_tuple_at<__i>(__x);
++ });
+ }
+ }
+ };
+@@ -322,13 +325,14 @@ template <typename _From, int _Np, typename _To, typename _Ap>
+ return __vector_convert<__vector_type_t<_To, _Np>>(__x.first);
+ else if constexpr (_Arg::_S_is_homogeneous)
+ return __call_with_n_evaluations<_Arg::_S_tuple_size>(
+- [](auto... __members) {
++ [](auto... __members) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ if constexpr ((is_convertible_v<decltype(__members), _To> && ...))
+ return __vector_type_t<_To, _Np>{static_cast<_To>(__members)...};
+ else
+ return __vector_convert<__vector_type_t<_To, _Np>>(__members...);
+- },
+- [&](auto __i) { return __get_tuple_at<__i>(__x); });
++ }, [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __get_tuple_at<__i>(__x);
++ });
+ else if constexpr (__fixed_size_storage_t<_To, _Np>::_S_tuple_size == 1)
+ {
+ _SimdConverter<_From, simd_abi::fixed_size<_Np>, _To,
+@@ -340,7 +344,7 @@ template <typename _From, int _Np, typename _To, typename _Ap>
+ {
+ const _SimdWrapper<_From, _Np> __xv
+ = __generate_from_n_evaluations<_Np, __vector_type_t<_From, _Np>>(
+- [&](auto __i) { return __x[__i]; });
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { return __x[__i]; });
+ return __vector_convert<__vector_type_t<_To, _Np>>(__xv);
+ }
+ }
+--- a/src/libstdc++-v3/include/experimental/bits/simd_detail.h
++++ b/src/libstdc++-v3/include/experimental/bits/simd_detail.h
+@@ -254,9 +254,11 @@
+
+ #ifdef __clang__
+ #define _GLIBCXX_SIMD_NORMAL_MATH
++#define _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
+ #else
+ #define _GLIBCXX_SIMD_NORMAL_MATH \
+ [[__gnu__::__optimize__("finite-math-only,no-signed-zeros")]]
++#define _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA __attribute__((__always_inline__))
+ #endif
+ #define _GLIBCXX_SIMD_NEVER_INLINE [[__gnu__::__noinline__]]
+ #define _GLIBCXX_SIMD_INTRINSIC \
+@@ -265,7 +267,7 @@
+ #define _GLIBCXX_SIMD_IS_UNLIKELY(__x) __builtin_expect(__x, 0)
+ #define _GLIBCXX_SIMD_IS_LIKELY(__x) __builtin_expect(__x, 1)
+
+-#if defined __STRICT_ANSI__ && __STRICT_ANSI__
++#if __STRICT_ANSI__ || defined __clang__
+ #define _GLIBCXX_SIMD_CONSTEXPR
+ #define _GLIBCXX_SIMD_USE_CONSTEXPR_API const
+ #else
+@@ -294,6 +296,8 @@
+ #ifdef _GLIBCXX_SIMD_NO_ALWAYS_INLINE
+ #undef _GLIBCXX_SIMD_ALWAYS_INLINE
+ #define _GLIBCXX_SIMD_ALWAYS_INLINE inline
++#undef _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
++#define _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
+ #undef _GLIBCXX_SIMD_INTRINSIC
+ #define _GLIBCXX_SIMD_INTRINSIC inline
+ #endif
+--- a/src/libstdc++-v3/include/experimental/bits/simd_fixed_size.h
++++ b/src/libstdc++-v3/include/experimental/bits/simd_fixed_size.h
+@@ -55,10 +55,7 @@ template <typename _Tp, typename _A0, typename... _As>
+
+ template <size_t _I, typename _Tp, typename _A0, typename... _As>
+ struct __simd_tuple_element<_I, _SimdTuple<_Tp, _A0, _As...>>
+- {
+- using type =
+- typename __simd_tuple_element<_I - 1, _SimdTuple<_Tp, _As...>>::type;
+- };
++ { using type = typename __simd_tuple_element<_I - 1, _SimdTuple<_Tp, _As...>>::type; };
+
+ template <size_t _I, typename _Tp>
+ using __simd_tuple_element_t = typename __simd_tuple_element<_I, _Tp>::type;
+@@ -80,10 +77,8 @@ template <typename _Tp, typename... _A0s, typename... _A1s>
+ }
+
+ template <typename _Tp, typename _A10, typename... _A1s>
+- _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple<_Tp, simd_abi::scalar, _A10,
+- _A1s...>
+- __simd_tuple_concat(const _Tp& __left,
+- const _SimdTuple<_Tp, _A10, _A1s...>& __right)
++ _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple<_Tp, simd_abi::scalar, _A10, _A1s...>
++ __simd_tuple_concat(const _Tp& __left, const _SimdTuple<_Tp, _A10, _A1s...>& __right)
+ { return {__left, __right}; }
+
+ // }}}
+@@ -112,37 +107,29 @@ struct __as_simd_tuple {};
+
+ template <typename _Tp, typename _A0, typename... _Abis>
+ _GLIBCXX_SIMD_INTRINSIC constexpr simd<_Tp, _A0>
+- __simd_tuple_get_impl(__as_simd, const _SimdTuple<_Tp, _A0, _Abis...>& __t,
+- _SizeConstant<0>)
++ __simd_tuple_get_impl(__as_simd, const _SimdTuple<_Tp, _A0, _Abis...>& __t, _SizeConstant<0>)
+ { return {__private_init, __t.first}; }
+
+ template <typename _Tp, typename _A0, typename... _Abis>
+ _GLIBCXX_SIMD_INTRINSIC constexpr const auto&
+- __simd_tuple_get_impl(__as_simd_tuple,
+- const _SimdTuple<_Tp, _A0, _Abis...>& __t,
++ __simd_tuple_get_impl(__as_simd_tuple, const _SimdTuple<_Tp, _A0, _Abis...>& __t,
+ _SizeConstant<0>)
+ { return __t.first; }
+
+ template <typename _Tp, typename _A0, typename... _Abis>
+ _GLIBCXX_SIMD_INTRINSIC constexpr auto&
+- __simd_tuple_get_impl(__as_simd_tuple, _SimdTuple<_Tp, _A0, _Abis...>& __t,
+- _SizeConstant<0>)
++ __simd_tuple_get_impl(__as_simd_tuple, _SimdTuple<_Tp, _A0, _Abis...>& __t, _SizeConstant<0>)
+ { return __t.first; }
+
+ template <typename _R, size_t _Np, typename _Tp, typename... _Abis>
+ _GLIBCXX_SIMD_INTRINSIC constexpr auto
+- __simd_tuple_get_impl(_R, const _SimdTuple<_Tp, _Abis...>& __t,
+- _SizeConstant<_Np>)
++ __simd_tuple_get_impl(_R, const _SimdTuple<_Tp, _Abis...>& __t, _SizeConstant<_Np>)
+ { return __simd_tuple_get_impl(_R(), __t.second, _SizeConstant<_Np - 1>()); }
+
+ template <size_t _Np, typename _Tp, typename... _Abis>
+ _GLIBCXX_SIMD_INTRINSIC constexpr auto&
+- __simd_tuple_get_impl(__as_simd_tuple, _SimdTuple<_Tp, _Abis...>& __t,
+- _SizeConstant<_Np>)
+- {
+- return __simd_tuple_get_impl(__as_simd_tuple(), __t.second,
+- _SizeConstant<_Np - 1>());
+- }
++ __simd_tuple_get_impl(__as_simd_tuple, _SimdTuple<_Tp, _Abis...>& __t, _SizeConstant<_Np>)
++ { return __simd_tuple_get_impl(__as_simd_tuple(), __t.second, _SizeConstant<_Np - 1>()); }
+
+ template <size_t _Np, typename _Tp, typename... _Abis>
+ _GLIBCXX_SIMD_INTRINSIC constexpr auto
+@@ -154,16 +141,12 @@ template <size_t _Np, typename _Tp, typename... _Abis>
+ template <size_t _Np, typename _Tp, typename... _Abis>
+ _GLIBCXX_SIMD_INTRINSIC constexpr auto
+ __get_tuple_at(const _SimdTuple<_Tp, _Abis...>& __t)
+- {
+- return __simd_tuple_get_impl(__as_simd_tuple(), __t, _SizeConstant<_Np>());
+- }
++ { return __simd_tuple_get_impl(__as_simd_tuple(), __t, _SizeConstant<_Np>()); }
+
+ template <size_t _Np, typename _Tp, typename... _Abis>
+ _GLIBCXX_SIMD_INTRINSIC constexpr auto&
+ __get_tuple_at(_SimdTuple<_Tp, _Abis...>& __t)
+- {
+- return __simd_tuple_get_impl(__as_simd_tuple(), __t, _SizeConstant<_Np>());
+- }
++ { return __simd_tuple_get_impl(__as_simd_tuple(), __t, _SizeConstant<_Np>()); }
+
+ // __tuple_element_meta {{{1
+ template <typename _Tp, typename _Abi, size_t _Offset>
+@@ -183,25 +166,25 @@ template <typename _Tp, typename _Abi, size_t _Offset>
+ static constexpr _MaskImpl _S_mask_impl = {};
+
+ template <size_t _Np, bool _Sanitized>
+- _GLIBCXX_SIMD_INTRINSIC static auto
++ _GLIBCXX_SIMD_INTRINSIC static constexpr auto
+ _S_submask(_BitMask<_Np, _Sanitized> __bits)
+ { return __bits.template _M_extract<_Offset, _S_size()>(); }
+
+ template <size_t _Np, bool _Sanitized>
+- _GLIBCXX_SIMD_INTRINSIC static _MaskMember
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember
+ _S_make_mask(_BitMask<_Np, _Sanitized> __bits)
+ {
+ return _MaskImpl::template _S_convert<_Tp>(
+ __bits.template _M_extract<_Offset, _S_size()>()._M_sanitized());
+ }
+
+- _GLIBCXX_SIMD_INTRINSIC static _ULLong
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _ULLong
+ _S_mask_to_shifted_ullong(_MaskMember __k)
+ { return _MaskImpl::_S_to_bits(__k).to_ullong() << _Offset; }
+ };
+
+ template <size_t _Offset, typename _Tp, typename _Abi, typename... _As>
+- _GLIBCXX_SIMD_INTRINSIC
++ _GLIBCXX_SIMD_INTRINSIC constexpr
+ __tuple_element_meta<_Tp, _Abi, _Offset>
+ __make_meta(const _SimdTuple<_Tp, _Abi, _As...>&)
+ { return {}; }
+@@ -213,17 +196,13 @@ template <size_t _Offset, typename _Base>
+ {
+ static inline constexpr size_t _S_offset = _Offset;
+
+- _GLIBCXX_SIMD_INTRINSIC char* _M_as_charptr()
+- {
+- return reinterpret_cast<char*>(this)
+- + _S_offset * sizeof(typename _Base::value_type);
+- }
++ _GLIBCXX_SIMD_INTRINSIC char*
++ _M_as_charptr()
++ { return reinterpret_cast<char*>(this) + _S_offset * sizeof(typename _Base::value_type); }
+
+- _GLIBCXX_SIMD_INTRINSIC const char* _M_as_charptr() const
+- {
+- return reinterpret_cast<const char*>(this)
+- + _S_offset * sizeof(typename _Base::value_type);
+- }
++ _GLIBCXX_SIMD_INTRINSIC const char*
++ _M_as_charptr() const
++ { return reinterpret_cast<const char*>(this) + _S_offset * sizeof(typename _Base::value_type); }
+ };
+
+ // make _WithOffset<_WithOffset> ill-formed to use:
+@@ -240,19 +219,13 @@ template <size_t _Offset, typename _Tp>
+ _GLIBCXX_SIMD_INTRINSIC
+ decltype(auto)
+ __add_offset(const _Tp& __base)
+- {
+- return static_cast<const _WithOffset<_Offset, __remove_cvref_t<_Tp>>&>(
+- __base);
+- }
++ { return static_cast<const _WithOffset<_Offset, __remove_cvref_t<_Tp>>&>(__base); }
+
+ template <size_t _Offset, size_t _ExistingOffset, typename _Tp>
+ _GLIBCXX_SIMD_INTRINSIC
+ decltype(auto)
+ __add_offset(_WithOffset<_ExistingOffset, _Tp>& __base)
+- {
+- return static_cast<_WithOffset<_Offset + _ExistingOffset, _Tp>&>(
+- static_cast<_Tp&>(__base));
+- }
++ { return static_cast<_WithOffset<_Offset + _ExistingOffset, _Tp>&>(static_cast<_Tp&>(__base)); }
+
+ template <size_t _Offset, size_t _ExistingOffset, typename _Tp>
+ _GLIBCXX_SIMD_INTRINSIC
+@@ -298,7 +271,8 @@ template <typename _FirstType, typename _SecondType>
+ _SecondType second;
+
+ _GLIBCXX_SIMD_INTRINSIC
+- constexpr bool _M_is_constprop() const
++ constexpr bool
++ _M_is_constprop() const
+ {
+ if constexpr (is_class_v<_FirstType>)
+ return first._M_is_constprop() && second._M_is_constprop();
+@@ -314,7 +288,8 @@ template <typename _FirstType, typename _Tp>
+ static constexpr _SimdTuple<_Tp> second = {};
+
+ _GLIBCXX_SIMD_INTRINSIC
+- constexpr bool _M_is_constprop() const
++ constexpr bool
++ _M_is_constprop() const
+ {
+ if constexpr (is_class_v<_FirstType>)
+ return first._M_is_constprop();
+@@ -353,25 +328,31 @@ template <typename _Tp, typename _Abi0, typename... _Abis>
+ = default;
+
+ template <typename _Up>
+- _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple(_Up&& __x)
++ _GLIBCXX_SIMD_INTRINSIC constexpr
++ _SimdTuple(_Up&& __x)
+ : _Base{static_cast<_Up&&>(__x)} {}
+
+ template <typename _Up, typename _Up2>
+- _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple(_Up&& __x, _Up2&& __y)
++ _GLIBCXX_SIMD_INTRINSIC constexpr
++ _SimdTuple(_Up&& __x, _Up2&& __y)
+ : _Base{static_cast<_Up&&>(__x), static_cast<_Up2&&>(__y)} {}
+
+ template <typename _Up>
+- _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple(_Up&& __x, _SimdTuple<_Tp>)
++ _GLIBCXX_SIMD_INTRINSIC constexpr
++ _SimdTuple(_Up&& __x, _SimdTuple<_Tp>)
+ : _Base{static_cast<_Up&&>(__x)} {}
+
+- _GLIBCXX_SIMD_INTRINSIC char* _M_as_charptr()
++ _GLIBCXX_SIMD_INTRINSIC char*
++ _M_as_charptr()
+ { return reinterpret_cast<char*>(this); }
+
+- _GLIBCXX_SIMD_INTRINSIC const char* _M_as_charptr() const
++ _GLIBCXX_SIMD_INTRINSIC const char*
++ _M_as_charptr() const
+ { return reinterpret_cast<const char*>(this); }
+
+ template <size_t _Np>
+- _GLIBCXX_SIMD_INTRINSIC constexpr auto& _M_at()
++ _GLIBCXX_SIMD_INTRINSIC constexpr auto&
++ _M_at()
+ {
+ if constexpr (_Np == 0)
+ return first;
+@@ -380,7 +361,8 @@ template <typename _Tp, typename _Abi0, typename... _Abis>
+ }
+
+ template <size_t _Np>
+- _GLIBCXX_SIMD_INTRINSIC constexpr const auto& _M_at() const
++ _GLIBCXX_SIMD_INTRINSIC constexpr const auto&
++ _M_at() const
+ {
+ if constexpr (_Np == 0)
+ return first;
+@@ -389,7 +371,8 @@ template <typename _Tp, typename _Abi0, typename... _Abis>
+ }
+
+ template <size_t _Np>
+- _GLIBCXX_SIMD_INTRINSIC constexpr auto _M_simd_at() const
++ _GLIBCXX_SIMD_INTRINSIC constexpr auto
++ _M_simd_at() const
+ {
+ if constexpr (_Np == 0)
+ return simd<_Tp, _Abi0>(__private_init, first);
+@@ -434,14 +417,15 @@ template <typename _Tp, typename _Abi0, typename... _Abis>
+ if constexpr (is_same_v<_SimdTuple, __remove_cvref_t<_Tup>>)
+ return __tup.first;
+ else if (__builtin_is_constant_evaluated())
+- return __fixed_size_storage_t<_TupT, _S_first_size>::_S_generate([&](
+- auto __meta) constexpr {
+- return __meta._S_generator(
+- [&](auto __i) constexpr { return __tup[__i]; },
+- static_cast<_TupT*>(nullptr));
++ return __fixed_size_storage_t<_TupT, _S_first_size>::_S_generate(
++ [&](auto __meta) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __meta._S_generator(
++ [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __tup[__i];
++ }, static_cast<_TupT*>(nullptr));
+ });
+ else
+- return [&]() {
++ return [&]() { // not always_inline; allow the compiler to decide
+ __fixed_size_storage_t<_TupT, _S_first_size> __r;
+ __builtin_memcpy(__r._M_as_charptr(), __tup._M_as_charptr(),
+ sizeof(__r));
+@@ -515,12 +499,11 @@ template <typename _Tp, typename _Abi0, typename... _Abis>
+ negation<is_const<remove_reference_t<_More>>>>) )
+ {
+ // need to write back at least one of __more after calling __fun
+- auto&& __first = [&](auto... __args) constexpr
+- {
++ auto&& __first = [&](auto... __args) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ auto __r = __fun(__tuple_element_meta<_Tp, _Abi0, 0>(), first,
+ __args...);
+ [[maybe_unused]] auto&& __ignore_me = {(
+- [](auto&& __dst, const auto& __src) {
++ [](auto&& __dst, const auto& __src) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ if constexpr (is_assignable_v<decltype(__dst),
+ decltype(__dst)>)
+ {
+@@ -530,8 +513,7 @@ template <typename _Tp, typename _Abi0, typename... _Abis>
+ }(static_cast<_More&&>(__more), __args),
+ 0)...};
+ return __r;
+- }
+- (_M_extract_argument(__more)...);
++ }(_M_extract_argument(__more)...);
+ if constexpr (_S_tuple_size == 1)
+ return {__first};
+ else
+@@ -553,8 +535,8 @@ template <typename _Tp, typename _Abi0, typename... _Abis>
+ }
+
+ template <typename _R = _Tp, typename _Fp, typename... _More>
+- _GLIBCXX_SIMD_INTRINSIC auto _M_apply_r(_Fp&& __fun,
+- const _More&... __more) const
++ _GLIBCXX_SIMD_INTRINSIC constexpr auto
++ _M_apply_r(_Fp&& __fun, const _More&... __more) const
+ {
+ auto&& __first = __fun(__tuple_element_meta<_Tp, _Abi0, 0>(), first,
+ __more.first...);
+@@ -591,51 +573,44 @@ template <typename _Tp, typename _Abi0, typename... _Abis>
+ return second[integral_constant<_Up, _I - simd_size_v<_Tp, _Abi0>>()];
+ }
+
+- _GLIBCXX_SIMD_INTRINSIC
+- _Tp operator[](size_t __i) const noexcept
++ _GLIBCXX_SIMD_INTRINSIC constexpr _Tp
++ operator[](size_t __i) const noexcept
+ {
+ if constexpr (_S_tuple_size == 1)
+ return _M_subscript_read(__i);
+- else
+- {
+ #ifdef _GLIBCXX_SIMD_USE_ALIASING_LOADS
+- return reinterpret_cast<const __may_alias<_Tp>*>(this)[__i];
+-#else
+- if constexpr (__is_scalar_abi<_Abi0>())
+- {
+- const _Tp* ptr = &first;
+- return ptr[__i];
+- }
+- else
+- return __i < simd_size_v<_Tp, _Abi0>
+- ? _M_subscript_read(__i)
+- : second[__i - simd_size_v<_Tp, _Abi0>];
++ else if (not __builtin_is_constant_evaluated())
++ return reinterpret_cast<const __may_alias<_Tp>*>(this)[__i];
+ #endif
++ else if constexpr (__is_scalar_abi<_Abi0>())
++ {
++ const _Tp* ptr = &first;
++ return ptr[__i];
+ }
++ else
++ return __i < simd_size_v<_Tp, _Abi0> ? _M_subscript_read(__i)
++ : second[__i - simd_size_v<_Tp, _Abi0>];
+ }
+
+- _GLIBCXX_SIMD_INTRINSIC
+- void _M_set(size_t __i, _Tp __val) noexcept
++ _GLIBCXX_SIMD_INTRINSIC constexpr void
++ _M_set(size_t __i, _Tp __val) noexcept
+ {
+ if constexpr (_S_tuple_size == 1)
+ return _M_subscript_write(__i, __val);
+- else
+- {
+ #ifdef _GLIBCXX_SIMD_USE_ALIASING_LOADS
+- reinterpret_cast<__may_alias<_Tp>*>(this)[__i] = __val;
+-#else
+- if (__i < simd_size_v<_Tp, _Abi0>)
+- _M_subscript_write(__i, __val);
+- else
+- second._M_set(__i - simd_size_v<_Tp, _Abi0>, __val);
++ else if (not __builtin_is_constant_evaluated())
++ reinterpret_cast<__may_alias<_Tp>*>(this)[__i] = __val;
+ #endif
+- }
++ else if (__i < simd_size_v<_Tp, _Abi0>)
++ _M_subscript_write(__i, __val);
++ else
++ second._M_set(__i - simd_size_v<_Tp, _Abi0>, __val);
+ }
+
+ private:
+ // _M_subscript_read/_write {{{
+- _GLIBCXX_SIMD_INTRINSIC
+- _Tp _M_subscript_read([[maybe_unused]] size_t __i) const noexcept
++ _GLIBCXX_SIMD_INTRINSIC constexpr _Tp
++ _M_subscript_read([[maybe_unused]] size_t __i) const noexcept
+ {
+ if constexpr (__is_vectorizable_v<_FirstType>)
+ return first;
+@@ -643,8 +618,8 @@ template <typename _Tp, typename _Abi0, typename... _Abis>
+ return first[__i];
+ }
+
+- _GLIBCXX_SIMD_INTRINSIC
+- void _M_subscript_write([[maybe_unused]] size_t __i, _Tp __y) noexcept
++ _GLIBCXX_SIMD_INTRINSIC constexpr void
++ _M_subscript_write([[maybe_unused]] size_t __i, _Tp __y) noexcept
+ {
+ if constexpr (__is_vectorizable_v<_FirstType>)
+ first = __y;
+@@ -657,22 +632,22 @@ template <typename _Tp, typename _Abi0, typename... _Abis>
+
+ // __make_simd_tuple {{{1
+ template <typename _Tp, typename _A0>
+- _GLIBCXX_SIMD_INTRINSIC _SimdTuple<_Tp, _A0>
++ _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple<_Tp, _A0>
+ __make_simd_tuple(simd<_Tp, _A0> __x0)
+ { return {__data(__x0)}; }
+
+ template <typename _Tp, typename _A0, typename... _As>
+- _GLIBCXX_SIMD_INTRINSIC _SimdTuple<_Tp, _A0, _As...>
++ _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple<_Tp, _A0, _As...>
+ __make_simd_tuple(const simd<_Tp, _A0>& __x0, const simd<_Tp, _As>&... __xs)
+ { return {__data(__x0), __make_simd_tuple(__xs...)}; }
+
+ template <typename _Tp, typename _A0>
+- _GLIBCXX_SIMD_INTRINSIC _SimdTuple<_Tp, _A0>
++ _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple<_Tp, _A0>
+ __make_simd_tuple(const typename _SimdTraits<_Tp, _A0>::_SimdMember& __arg0)
+ { return {__arg0}; }
+
+ template <typename _Tp, typename _A0, typename _A1, typename... _Abis>
+- _GLIBCXX_SIMD_INTRINSIC _SimdTuple<_Tp, _A0, _A1, _Abis...>
++ _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple<_Tp, _A0, _A1, _Abis...>
+ __make_simd_tuple(
+ const typename _SimdTraits<_Tp, _A0>::_SimdMember& __arg0,
+ const typename _SimdTraits<_Tp, _A1>::_SimdMember& __arg1,
+@@ -688,8 +663,7 @@ template <typename _Tp, size_t _Np,
+ size_t _Offset = 0, // skip this many elements in __from0
+ typename _R = __fixed_size_storage_t<_Tp, _Np>, typename _V0,
+ typename _V0VT = _VectorTraits<_V0>, typename... _VX>
+- _GLIBCXX_SIMD_INTRINSIC _R constexpr __to_simd_tuple(const _V0 __from0,
+- const _VX... __fromX)
++ _GLIBCXX_SIMD_INTRINSIC _R constexpr __to_simd_tuple(const _V0 __from0, const _VX... __fromX)
+ {
+ static_assert(is_same_v<typename _V0VT::value_type, _Tp>);
+ static_assert(_Offset < _V0VT::_S_full_size);
+@@ -776,18 +750,18 @@ template <typename _Tp, size_t _Np, typename _V, size_t _NV, typename... _VX>
+ sizeof...(_VX) == 0,
+ "An array of scalars must be the last argument to __to_simd_tuple");
+ return __call_with_subscripts(
+- __from,
+- make_index_sequence<_NV>(), [&](const auto... __args) constexpr {
+- return __simd_tuple_concat(
+- _SimdTuple<_Tp, simd_abi::scalar>{__args}..., _SimdTuple<_Tp>());
+- });
++ __from, make_index_sequence<_NV>(),
++ [&](const auto... __args) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __simd_tuple_concat(
++ _SimdTuple<_Tp, simd_abi::scalar>{__args}..., _SimdTuple<_Tp>());
++ });
+ }
+ else
+ return __call_with_subscripts(
+- __from,
+- make_index_sequence<_NV>(), [&](const auto... __args) constexpr {
+- return __to_simd_tuple<_Tp, _Np>(__args..., __fromX...);
+- });
++ __from, make_index_sequence<_NV>(),
++ [&](const auto... __args) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __to_simd_tuple<_Tp, _Np>(__args..., __fromX...);
++ });
+ }
+
+ template <size_t, typename _Tp>
+@@ -816,19 +790,19 @@ template <typename _Tp, typename _A0, size_t _NOut, size_t _Np,
+
+ // __optimize_simd_tuple {{{1
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC _SimdTuple<_Tp>
++ _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple<_Tp>
+ __optimize_simd_tuple(const _SimdTuple<_Tp>)
+ { return {}; }
+
+ template <typename _Tp, typename _Ap>
+- _GLIBCXX_SIMD_INTRINSIC const _SimdTuple<_Tp, _Ap>&
++ _GLIBCXX_SIMD_INTRINSIC constexpr const _SimdTuple<_Tp, _Ap>&
+ __optimize_simd_tuple(const _SimdTuple<_Tp, _Ap>& __x)
+ { return __x; }
+
+ template <typename _Tp, typename _A0, typename _A1, typename... _Abis,
+ typename _R = __fixed_size_storage_t<
+ _Tp, _SimdTuple<_Tp, _A0, _A1, _Abis...>::_S_size()>>
+- _GLIBCXX_SIMD_INTRINSIC _R
++ _GLIBCXX_SIMD_INTRINSIC constexpr _R
+ __optimize_simd_tuple(const _SimdTuple<_Tp, _A0, _A1, _Abis...>& __x)
+ {
+ using _Tup = _SimdTuple<_Tp, _A0, _A1, _Abis...>;
+@@ -841,7 +815,7 @@ template <typename _Tp, typename _A0, typename _A1, typename... _Abis,
+ || _A0::template _S_is_partial<_Tp>)
+ return {__generate_from_n_evaluations<_R::_S_first_size,
+ typename _R::_FirstType>(
+- [&](auto __i) { return __x[__i]; }),
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { return __x[__i]; }),
+ __optimize_simd_tuple(
+ __simd_tuple_pop_front<_R::_S_first_size>(__x))};
+ else if constexpr (is_same_v<_A0, _A1>
+@@ -901,11 +875,8 @@ template <size_t _Offset = 0, typename _Tp, typename _A0, typename _A1,
+ // __for_each(_SimdTuple &, const _SimdTuple &, Fun) {{{1
+ template <size_t _Offset = 0, typename _Tp, typename _A0, typename _Fp>
+ _GLIBCXX_SIMD_INTRINSIC constexpr void
+- __for_each(_SimdTuple<_Tp, _A0>& __a, const _SimdTuple<_Tp, _A0>& __b,
+- _Fp&& __fun)
+- {
+- static_cast<_Fp&&>(__fun)(__make_meta<_Offset>(__a), __a.first, __b.first);
+- }
++ __for_each(_SimdTuple<_Tp, _A0>& __a, const _SimdTuple<_Tp, _A0>& __b, _Fp&& __fun)
++ { static_cast<_Fp&&>(__fun)(__make_meta<_Offset>(__a), __a.first, __b.first); }
+
+ template <size_t _Offset = 0, typename _Tp, typename _A0, typename _A1,
+ typename... _As, typename _Fp>
+@@ -921,11 +892,8 @@ template <size_t _Offset = 0, typename _Tp, typename _A0, typename _A1,
+ // __for_each(const _SimdTuple &, const _SimdTuple &, Fun) {{{1
+ template <size_t _Offset = 0, typename _Tp, typename _A0, typename _Fp>
+ _GLIBCXX_SIMD_INTRINSIC constexpr void
+- __for_each(const _SimdTuple<_Tp, _A0>& __a, const _SimdTuple<_Tp, _A0>& __b,
+- _Fp&& __fun)
+- {
+- static_cast<_Fp&&>(__fun)(__make_meta<_Offset>(__a), __a.first, __b.first);
+- }
++ __for_each(const _SimdTuple<_Tp, _A0>& __a, const _SimdTuple<_Tp, _A0>& __b, _Fp&& __fun)
++ { static_cast<_Fp&&>(__fun)(__make_meta<_Offset>(__a), __a.first, __b.first); }
+
+ template <size_t _Offset = 0, typename _Tp, typename _A0, typename _A1,
+ typename... _As, typename _Fp>
+@@ -940,9 +908,8 @@ template <size_t _Offset = 0, typename _Tp, typename _A0, typename _A1,
+
+ // }}}1
+ // __extract_part(_SimdTuple) {{{
+-template <int _Index, int _Total, int _Combine, typename _Tp, typename _A0,
+- typename... _As>
+- _GLIBCXX_SIMD_INTRINSIC auto // __vector_type_t or _SimdTuple
++template <int _Index, int _Total, int _Combine, typename _Tp, typename _A0, typename... _As>
++ _GLIBCXX_SIMD_INTRINSIC constexpr auto // __vector_type_t or _SimdTuple
+ __extract_part(const _SimdTuple<_Tp, _A0, _As...>& __x)
+ {
+ // worst cases:
+@@ -994,10 +961,11 @@ template <int _Index, int _Total, int _Combine, typename _Tp, typename _A0,
+ return __as_vector(simd<_Tp, _RetAbi>(element_ptr, element_aligned));
+ #else
+ [[maybe_unused]] constexpr size_t __offset = __values_to_skip;
+- return __as_vector(simd<_Tp, _RetAbi>([&](auto __i) constexpr {
+- constexpr _SizeConstant<__i + __offset> __k;
+- return __x[__k];
+- }));
++ return __as_vector(simd<_Tp, _RetAbi>(
++ [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ constexpr _SizeConstant<__i + __offset> __k;
++ return __x[__k];
++ }));
+ #endif
+ }
+
+@@ -1042,11 +1010,11 @@ template <typename _Tp, bool = is_arithmetic_v<__remove_cvref_t<_Tp>>>
+ _Tp _M_data;
+ using _TT = __remove_cvref_t<_Tp>;
+
+- _GLIBCXX_SIMD_INTRINSIC
++ _GLIBCXX_SIMD_INTRINSIC constexpr
+ operator _TT()
+ { return _M_data; }
+
+- _GLIBCXX_SIMD_INTRINSIC
++ _GLIBCXX_SIMD_INTRINSIC constexpr
+ operator _TT&()
+ {
+ static_assert(is_lvalue_reference<_Tp>::value, "");
+@@ -1054,7 +1022,7 @@ template <typename _Tp, bool = is_arithmetic_v<__remove_cvref_t<_Tp>>>
+ return _M_data;
+ }
+
+- _GLIBCXX_SIMD_INTRINSIC
++ _GLIBCXX_SIMD_INTRINSIC constexpr
+ operator _TT*()
+ {
+ static_assert(is_lvalue_reference<_Tp>::value, "");
+@@ -1062,29 +1030,23 @@ template <typename _Tp, bool = is_arithmetic_v<__remove_cvref_t<_Tp>>>
+ return &_M_data;
+ }
+
+- _GLIBCXX_SIMD_INTRINSIC
+- constexpr __autocvt_to_simd(_Tp dd) : _M_data(dd) {}
++ _GLIBCXX_SIMD_INTRINSIC constexpr
++ __autocvt_to_simd(_Tp dd) : _M_data(dd) {}
+
+ template <typename _Abi>
+- _GLIBCXX_SIMD_INTRINSIC
++ _GLIBCXX_SIMD_INTRINSIC constexpr
+ operator simd<typename _TT::value_type, _Abi>()
+ { return {__private_init, _M_data}; }
+
+ template <typename _Abi>
+- _GLIBCXX_SIMD_INTRINSIC
++ _GLIBCXX_SIMD_INTRINSIC constexpr
+ operator simd<typename _TT::value_type, _Abi>&()
+- {
+- return *reinterpret_cast<simd<typename _TT::value_type, _Abi>*>(
+- &_M_data);
+- }
++ { return *reinterpret_cast<simd<typename _TT::value_type, _Abi>*>(&_M_data); }
+
+ template <typename _Abi>
+- _GLIBCXX_SIMD_INTRINSIC
++ _GLIBCXX_SIMD_INTRINSIC constexpr
+ operator simd<typename _TT::value_type, _Abi>*()
+- {
+- return reinterpret_cast<simd<typename _TT::value_type, _Abi>*>(
+- &_M_data);
+- }
++ { return reinterpret_cast<simd<typename _TT::value_type, _Abi>*>(&_M_data); }
+ };
+
+ template <typename _Tp>
+@@ -1104,11 +1066,11 @@ template <typename _Tp>
+ ~__autocvt_to_simd()
+ { _M_data = __data(_M_fd).first; }
+
+- _GLIBCXX_SIMD_INTRINSIC
++ _GLIBCXX_SIMD_INTRINSIC constexpr
+ operator fixed_size_simd<_TT, 1>()
+ { return _M_fd; }
+
+- _GLIBCXX_SIMD_INTRINSIC
++ _GLIBCXX_SIMD_INTRINSIC constexpr
+ operator fixed_size_simd<_TT, 1> &()
+ {
+ static_assert(is_lvalue_reference<_Tp>::value, "");
+@@ -1116,7 +1078,7 @@ template <typename _Tp>
+ return _M_fd;
+ }
+
+- _GLIBCXX_SIMD_INTRINSIC
++ _GLIBCXX_SIMD_INTRINSIC constexpr
+ operator fixed_size_simd<_TT, 1> *()
+ {
+ static_assert(is_lvalue_reference<_Tp>::value, "");
+@@ -1193,16 +1155,17 @@ template <int _Np>
+ {
+ // The following ensures, function arguments are passed via the stack.
+ // This is important for ABI compatibility across TU boundaries
+- _GLIBCXX_SIMD_ALWAYS_INLINE
++ _GLIBCXX_SIMD_ALWAYS_INLINE constexpr
+ _SimdBase(const _SimdBase&) {}
++
+ _SimdBase() = default;
+
+- _GLIBCXX_SIMD_ALWAYS_INLINE
+- explicit operator const _SimdMember &() const
++ _GLIBCXX_SIMD_ALWAYS_INLINE constexpr explicit
++ operator const _SimdMember &() const
+ { return static_cast<const simd<_Tp, _Fixed>*>(this)->_M_data; }
+
+- _GLIBCXX_SIMD_ALWAYS_INLINE
+- explicit operator array<_Tp, _Np>() const
++ _GLIBCXX_SIMD_ALWAYS_INLINE constexpr explicit
++ operator array<_Tp, _Np>() const
+ {
+ array<_Tp, _Np> __r;
+ // _SimdMember can be larger because of higher alignment
+@@ -1222,12 +1185,14 @@ template <int _Np>
+ // _SimdCastType {{{
+ struct _SimdCastType
+ {
+- _GLIBCXX_SIMD_ALWAYS_INLINE
++ _GLIBCXX_SIMD_ALWAYS_INLINE constexpr
+ _SimdCastType(const array<_Tp, _Np>&);
+- _GLIBCXX_SIMD_ALWAYS_INLINE
++
++ _GLIBCXX_SIMD_ALWAYS_INLINE constexpr
+ _SimdCastType(const _SimdMember& dd) : _M_data(dd) {}
+- _GLIBCXX_SIMD_ALWAYS_INLINE
+- explicit operator const _SimdMember &() const { return _M_data; }
++
++ _GLIBCXX_SIMD_ALWAYS_INLINE constexpr explicit
++ operator const _SimdMember &() const { return _M_data; }
+
+ private:
+ const _SimdMember& _M_data;
+@@ -1284,52 +1249,56 @@ template <int _Np, typename>
+
+ // broadcast {{{2
+ template <typename _Tp>
+- static constexpr inline _SimdMember<_Tp> _S_broadcast(_Tp __x) noexcept
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdMember<_Tp>
++ _S_broadcast(_Tp __x) noexcept
+ {
+- return _SimdMember<_Tp>::_S_generate([&](auto __meta) constexpr {
+- return __meta._S_broadcast(__x);
+- });
++ return _SimdMember<_Tp>::_S_generate(
++ [&](auto __meta) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __meta._S_broadcast(__x);
++ });
+ }
+
+ // _S_generator {{{2
+ template <typename _Fp, typename _Tp>
+- static constexpr inline _SimdMember<_Tp> _S_generator(_Fp&& __gen,
+- _TypeTag<_Tp>)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdMember<_Tp>
++ _S_generator(_Fp&& __gen, _TypeTag<_Tp>)
+ {
+- return _SimdMember<_Tp>::_S_generate([&__gen](auto __meta) constexpr {
+- return __meta._S_generator(
+- [&](auto __i) constexpr {
+- return __i < _Np ? __gen(_SizeConstant<__meta._S_offset + __i>())
+- : 0;
+- },
+- _TypeTag<_Tp>());
+- });
++ return _SimdMember<_Tp>::_S_generate(
++ [&__gen](auto __meta) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __meta._S_generator(
++ [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __i < _Np ? __gen(_SizeConstant<__meta._S_offset + __i>())
++ : 0;
++ },
++ _TypeTag<_Tp>());
++ });
+ }
+
+ // _S_load {{{2
+ template <typename _Tp, typename _Up>
+- static inline _SimdMember<_Tp> _S_load(const _Up* __mem,
+- _TypeTag<_Tp>) noexcept
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdMember<_Tp>
++ _S_load(const _Up* __mem, _TypeTag<_Tp>) noexcept
+ {
+- return _SimdMember<_Tp>::_S_generate([&](auto __meta) {
+- return __meta._S_load(&__mem[__meta._S_offset], _TypeTag<_Tp>());
+- });
++ return _SimdMember<_Tp>::_S_generate(
++ [&](auto __meta) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __meta._S_load(&__mem[__meta._S_offset], _TypeTag<_Tp>());
++ });
+ }
+
+ // _S_masked_load {{{2
+ template <typename _Tp, typename... _As, typename _Up>
+- static inline _SimdTuple<_Tp, _As...>
++ _GLIBCXX_SIMD_INTRINSIC static _SimdTuple<_Tp, _As...>
+ _S_masked_load(const _SimdTuple<_Tp, _As...>& __old,
+ const _MaskMember __bits, const _Up* __mem) noexcept
+ {
+ auto __merge = __old;
+- __for_each(__merge, [&](auto __meta, auto& __native) {
++ __for_each(__merge, [&](auto __meta, auto& __native) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ if (__meta._S_submask(__bits).any())
+ #pragma GCC diagnostic push
+- // __mem + __mem._S_offset could be UB ([expr.add]/4.3, but it punts
+- // the responsibility for avoiding UB to the caller of the masked load
+- // via the mask. Consequently, the compiler may assume this branch is
+- // unreachable, if the pointer arithmetic is UB.
++ // Dereferencing __mem + __meta._S_offset could be UB ([expr.add]/4.3).
++ // It is the responsibility of the caller of the masked load (via the mask's value) to
++ // avoid UB. Consequently, the compiler may assume this branch is unreachable, if the
++ // pointer arithmetic is UB.
+ #pragma GCC diagnostic ignored "-Warray-bounds"
+ __native
+ = __meta._S_masked_load(__native, __meta._S_make_mask(__bits),
+@@ -1341,21 +1310,21 @@ template <int _Np, typename>
+
+ // _S_store {{{2
+ template <typename _Tp, typename _Up>
+- static inline void _S_store(const _SimdMember<_Tp>& __v, _Up* __mem,
+- _TypeTag<_Tp>) noexcept
++ _GLIBCXX_SIMD_INTRINSIC static constexpr void
++ _S_store(const _SimdMember<_Tp>& __v, _Up* __mem, _TypeTag<_Tp>) noexcept
+ {
+- __for_each(__v, [&](auto __meta, auto __native) {
++ __for_each(__v, [&](auto __meta, auto __native) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ __meta._S_store(__native, &__mem[__meta._S_offset], _TypeTag<_Tp>());
+ });
+ }
+
+ // _S_masked_store {{{2
+ template <typename _Tp, typename... _As, typename _Up>
+- static inline void _S_masked_store(const _SimdTuple<_Tp, _As...>& __v,
+- _Up* __mem,
+- const _MaskMember __bits) noexcept
++ _GLIBCXX_SIMD_INTRINSIC static void
++ _S_masked_store(const _SimdTuple<_Tp, _As...>& __v, _Up* __mem,
++ const _MaskMember __bits) noexcept
+ {
+- __for_each(__v, [&](auto __meta, auto __native) {
++ __for_each(__v, [&](auto __meta, auto __native) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ if (__meta._S_submask(__bits).any())
+ #pragma GCC diagnostic push
+ // __mem + __mem._S_offset could be UB ([expr.add]/4.3, but it punts
+@@ -1371,12 +1340,12 @@ template <int _Np, typename>
+
+ // negation {{{2
+ template <typename _Tp, typename... _As>
+- static inline _MaskMember
++ static constexpr inline _MaskMember
+ _S_negate(const _SimdTuple<_Tp, _As...>& __x) noexcept
+ {
+ _MaskMember __bits = 0;
+ __for_each(
+- __x, [&__bits](auto __meta, auto __native) constexpr {
++ __x, [&__bits](auto __meta, auto __native) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ __bits
+ |= __meta._S_mask_to_shifted_ullong(__meta._S_negate(__native));
+ });
+@@ -1414,7 +1383,7 @@ template <int _Np, typename>
+ {
+ const auto& __x2 = __call_with_n_evaluations<
+ __div_roundup(_Tup::_S_tuple_size, 2)>(
+- [](auto __first_simd, auto... __remaining) {
++ [](auto __first_simd, auto... __remaining) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ if constexpr (sizeof...(__remaining) == 0)
+ return __first_simd;
+ else
+@@ -1428,7 +1397,7 @@ template <int _Np, typename>
+ __make_simd_tuple(__first_simd, __remaining...));
+ }
+ },
+- [&](auto __i) {
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ auto __left = __tup.template _M_simd_at<2 * __i>();
+ if constexpr (2 * __i + 1 == _Tup::_S_tuple_size)
+ return __left;
+@@ -1444,7 +1413,9 @@ template <int _Np, typename>
+ _GLIBCXX_SIMD_USE_CONSTEXPR_API
+ typename _LT::mask_type __k(
+ __private_init,
+- [](auto __j) constexpr { return __j < _RT::size(); });
++ [](auto __j) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __j < _RT::size();
++ });
+ _LT __ext_right = __left;
+ where(__k, __ext_right)
+ = __proposed::resizing_simd_cast<_LT>(__right);
+@@ -1459,24 +1430,22 @@ template <int _Np, typename>
+
+ // _S_min, _S_max {{{2
+ template <typename _Tp, typename... _As>
+- static inline constexpr _SimdTuple<_Tp, _As...>
+- _S_min(const _SimdTuple<_Tp, _As...>& __a,
+- const _SimdTuple<_Tp, _As...>& __b)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdTuple<_Tp, _As...>
++ _S_min(const _SimdTuple<_Tp, _As...>& __a, const _SimdTuple<_Tp, _As...>& __b)
+ {
+ return __a._M_apply_per_chunk(
+- [](auto __impl, auto __aa, auto __bb) constexpr {
++ [](auto __impl, auto __aa, auto __bb) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ return __impl._S_min(__aa, __bb);
+ },
+ __b);
+ }
+
+ template <typename _Tp, typename... _As>
+- static inline constexpr _SimdTuple<_Tp, _As...>
+- _S_max(const _SimdTuple<_Tp, _As...>& __a,
+- const _SimdTuple<_Tp, _As...>& __b)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdTuple<_Tp, _As...>
++ _S_max(const _SimdTuple<_Tp, _As...>& __a, const _SimdTuple<_Tp, _As...>& __b)
+ {
+ return __a._M_apply_per_chunk(
+- [](auto __impl, auto __aa, auto __bb) constexpr {
++ [](auto __impl, auto __aa, auto __bb) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ return __impl._S_max(__aa, __bb);
+ },
+ __b);
+@@ -1484,36 +1453,38 @@ template <int _Np, typename>
+
+ // _S_complement {{{2
+ template <typename _Tp, typename... _As>
+- static inline constexpr _SimdTuple<_Tp, _As...>
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdTuple<_Tp, _As...>
+ _S_complement(const _SimdTuple<_Tp, _As...>& __x) noexcept
+ {
+- return __x._M_apply_per_chunk([](auto __impl, auto __xx) constexpr {
+- return __impl._S_complement(__xx);
+- });
++ return __x._M_apply_per_chunk(
++ [](auto __impl, auto __xx) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __impl._S_complement(__xx);
++ });
+ }
+
+ // _S_unary_minus {{{2
+ template <typename _Tp, typename... _As>
+- static inline constexpr _SimdTuple<_Tp, _As...>
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdTuple<_Tp, _As...>
+ _S_unary_minus(const _SimdTuple<_Tp, _As...>& __x) noexcept
+ {
+- return __x._M_apply_per_chunk([](auto __impl, auto __xx) constexpr {
+- return __impl._S_unary_minus(__xx);
+- });
++ return __x._M_apply_per_chunk(
++ [](auto __impl, auto __xx) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __impl._S_unary_minus(__xx);
++ });
+ }
+
+ // arithmetic operators {{{2
+
+-#define _GLIBCXX_SIMD_FIXED_OP(name_, op_) \
+- template <typename _Tp, typename... _As> \
+- static inline constexpr _SimdTuple<_Tp, _As...> name_( \
+- const _SimdTuple<_Tp, _As...>& __x, const _SimdTuple<_Tp, _As...>& __y)\
+- { \
+- return __x._M_apply_per_chunk( \
+- [](auto __impl, auto __xx, auto __yy) constexpr { \
+- return __impl.name_(__xx, __yy); \
+- }, \
+- __y); \
++#define _GLIBCXX_SIMD_FIXED_OP(name_, op_) \
++ template <typename _Tp, typename... _As> \
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdTuple<_Tp, _As...> name_( \
++ const _SimdTuple<_Tp, _As...>& __x, const _SimdTuple<_Tp, _As...>& __y) \
++ { \
++ return __x._M_apply_per_chunk( \
++ [](auto __impl, auto __xx, auto __yy) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { \
++ return __impl.name_(__xx, __yy); \
++ }, \
++ __y); \
+ }
+
+ _GLIBCXX_SIMD_FIXED_OP(_S_plus, +)
+@@ -1529,21 +1500,23 @@ template <int _Np, typename>
+ #undef _GLIBCXX_SIMD_FIXED_OP
+
+ template <typename _Tp, typename... _As>
+- static inline constexpr _SimdTuple<_Tp, _As...>
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdTuple<_Tp, _As...>
+ _S_bit_shift_left(const _SimdTuple<_Tp, _As...>& __x, int __y)
+ {
+- return __x._M_apply_per_chunk([__y](auto __impl, auto __xx) constexpr {
+- return __impl._S_bit_shift_left(__xx, __y);
+- });
++ return __x._M_apply_per_chunk(
++ [__y](auto __impl, auto __xx) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __impl._S_bit_shift_left(__xx, __y);
++ });
+ }
+
+ template <typename _Tp, typename... _As>
+- static inline constexpr _SimdTuple<_Tp, _As...>
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdTuple<_Tp, _As...>
+ _S_bit_shift_right(const _SimdTuple<_Tp, _As...>& __x, int __y)
+ {
+- return __x._M_apply_per_chunk([__y](auto __impl, auto __xx) constexpr {
+- return __impl._S_bit_shift_right(__xx, __y);
+- });
++ return __x._M_apply_per_chunk(
++ [__y](auto __impl, auto __xx) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __impl._S_bit_shift_right(__xx, __y);
++ });
+ }
+
+ // math {{{2
+@@ -1557,35 +1530,40 @@ template <int _Np, typename>
+ { \
+ if constexpr (is_same_v<_Tp, _RetTp>) \
+ return __x._M_apply_per_chunk( \
+- [](auto __impl, auto __xx) constexpr { \
+- using _V = typename decltype(__impl)::simd_type; \
+- return __data(__name(_V(__private_init, __xx))); \
+- }); \
++ [](auto __impl, auto __xx) \
++ constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA \
++ { \
++ using _V = typename decltype(__impl)::simd_type; \
++ return __data(__name(_V(__private_init, __xx))); \
++ }); \
+ else \
+ return __optimize_simd_tuple( \
+- __x.template _M_apply_r<_RetTp>([](auto __impl, auto __xx) { \
+- return __impl._S_##__name(__xx); \
+- })); \
++ __x.template _M_apply_r<_RetTp>( \
++ [](auto __impl, auto __xx) \
++ _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA \
++ { return __impl._S_##__name(__xx); })); \
+ } \
+ else if constexpr ( \
+ is_same_v< \
+ _Tp, \
+ _RetTp> && (... && is_same_v<_SimdTuple<_Tp, _As...>, _More>) ) \
+ return __x._M_apply_per_chunk( \
+- [](auto __impl, auto __xx, auto... __pack) constexpr { \
+- using _V = typename decltype(__impl)::simd_type; \
+- return __data(__name(_V(__private_init, __xx), \
+- _V(__private_init, __pack)...)); \
+- }, \
+- __more...); \
++ [](auto __impl, auto __xx, auto... __pack) \
++ constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA \
++ { \
++ using _V = typename decltype(__impl)::simd_type; \
++ return __data(__name(_V(__private_init, __xx), \
++ _V(__private_init, __pack)...)); \
++ }, __more...); \
+ else if constexpr (is_same_v<_Tp, _RetTp>) \
+ return __x._M_apply_per_chunk( \
+- [](auto __impl, auto __xx, auto... __pack) constexpr { \
+- using _V = typename decltype(__impl)::simd_type; \
+- return __data(__name(_V(__private_init, __xx), \
+- __autocvt_to_simd(__pack)...)); \
+- }, \
+- __more...); \
++ [](auto __impl, auto __xx, auto... __pack) \
++ constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA \
++ { \
++ using _V = typename decltype(__impl)::simd_type; \
++ return __data(__name(_V(__private_init, __xx), \
++ __autocvt_to_simd(__pack)...)); \
++ }, __more...); \
+ else \
+ __assert_unreachable<_Tp>(); \
+ }
+@@ -1651,16 +1629,15 @@ template <int _Np, typename>
+ #undef _GLIBCXX_SIMD_APPLY_ON_TUPLE
+
+ template <typename _Tp, typename... _Abis>
+- static _SimdTuple<_Tp, _Abis...> _S_remquo(
+- const _SimdTuple<_Tp, _Abis...>& __x,
+- const _SimdTuple<_Tp, _Abis...>& __y,
+- __fixed_size_storage_t<int, _SimdTuple<_Tp, _Abis...>::_S_size()>* __z)
++ static inline _SimdTuple<_Tp, _Abis...>
++ _S_remquo(const _SimdTuple<_Tp, _Abis...>& __x, const _SimdTuple<_Tp, _Abis...>& __y,
++ __fixed_size_storage_t<int, _SimdTuple<_Tp, _Abis...>::_S_size()>* __z)
+ {
+ return __x._M_apply_per_chunk(
+- [](auto __impl, const auto __xx, const auto __yy, auto& __zz) {
+- return __impl._S_remquo(__xx, __yy, &__zz);
+- },
+- __y, *__z);
++ [](auto __impl, const auto __xx, const auto __yy, auto& __zz)
++ _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
++ { return __impl._S_remquo(__xx, __yy, &__zz); },
++ __y, *__z);
+ }
+
+ template <typename _Tp, typename... _As>
+@@ -1669,22 +1646,20 @@ template <int _Np, typename>
+ __fixed_size_storage_t<int, _Np>& __exp) noexcept
+ {
+ return __x._M_apply_per_chunk(
+- [](auto __impl, const auto& __a, auto& __b) {
+- return __data(
+- frexp(typename decltype(__impl)::simd_type(__private_init, __a),
+- __autocvt_to_simd(__b)));
+- },
+- __exp);
++ [](auto __impl, const auto& __a, auto& __b) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __data(frexp(typename decltype(__impl)::simd_type(__private_init, __a),
++ __autocvt_to_simd(__b)));
++ }, __exp);
+ }
+
+-#define _GLIBCXX_SIMD_TEST_ON_TUPLE_(name_) \
+- template <typename _Tp, typename... _As> \
+- static inline _MaskMember \
+- _S_##name_(const _SimdTuple<_Tp, _As...>& __x) noexcept \
+- { \
+- return _M_test([](auto __impl, \
+- auto __xx) { return __impl._S_##name_(__xx); }, \
+- __x); \
++#define _GLIBCXX_SIMD_TEST_ON_TUPLE_(name_) \
++ template <typename _Tp, typename... _As> \
++ static inline _MaskMember \
++ _S_##name_(const _SimdTuple<_Tp, _As...>& __x) noexcept \
++ { \
++ return _M_test([] (auto __impl, auto __xx) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { \
++ return __impl._S_##name_(__xx); \
++ }, __x); \
+ }
+
+ _GLIBCXX_SIMD_TEST_ON_TUPLE_(isinf)
+@@ -1700,7 +1675,7 @@ template <int _Np, typename>
+ _S_increment(_SimdTuple<_Ts...>& __x)
+ {
+ __for_each(
+- __x, [](auto __meta, auto& native) constexpr {
++ __x, [](auto __meta, auto& native) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ __meta._S_increment(native);
+ });
+ }
+@@ -1710,7 +1685,7 @@ template <int _Np, typename>
+ _S_decrement(_SimdTuple<_Ts...>& __x)
+ {
+ __for_each(
+- __x, [](auto __meta, auto& native) constexpr {
++ __x, [](auto __meta, auto& native) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ __meta._S_decrement(native);
+ });
+ }
+@@ -1718,15 +1693,14 @@ template <int _Np, typename>
+ // compares {{{2
+ #define _GLIBCXX_SIMD_CMP_OPERATIONS(__cmp) \
+ template <typename _Tp, typename... _As> \
+- _GLIBCXX_SIMD_INTRINSIC constexpr static _MaskMember \
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember \
+ __cmp(const _SimdTuple<_Tp, _As...>& __x, \
+ const _SimdTuple<_Tp, _As...>& __y) \
+ { \
+- return _M_test( \
+- [](auto __impl, auto __xx, auto __yy) constexpr { \
+- return __impl.__cmp(__xx, __yy); \
+- }, \
+- __x, __y); \
++ return _M_test([](auto __impl, auto __xx, auto __yy) \
++ constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA \
++ { return __impl.__cmp(__xx, __yy); }, \
++ __x, __y); \
+ }
+
+ _GLIBCXX_SIMD_CMP_OPERATIONS(_S_equal_to)
+@@ -1743,33 +1717,34 @@ template <int _Np, typename>
+
+ // smart_reference access {{{2
+ template <typename _Tp, typename... _As, typename _Up>
+- _GLIBCXX_SIMD_INTRINSIC static void _S_set(_SimdTuple<_Tp, _As...>& __v,
+- int __i, _Up&& __x) noexcept
++ _GLIBCXX_SIMD_INTRINSIC static constexpr void
++ _S_set(_SimdTuple<_Tp, _As...>& __v, int __i, _Up&& __x) noexcept
+ { __v._M_set(__i, static_cast<_Up&&>(__x)); }
+
+ // _S_masked_assign {{{2
+ template <typename _Tp, typename... _As>
+- _GLIBCXX_SIMD_INTRINSIC static void
++ _GLIBCXX_SIMD_INTRINSIC static constexpr void
+ _S_masked_assign(const _MaskMember __bits, _SimdTuple<_Tp, _As...>& __lhs,
+ const __type_identity_t<_SimdTuple<_Tp, _As...>>& __rhs)
+ {
+- __for_each(
+- __lhs, __rhs,
+- [&](auto __meta, auto& __native_lhs, auto __native_rhs) constexpr {
+- __meta._S_masked_assign(__meta._S_make_mask(__bits), __native_lhs,
+- __native_rhs);
+- });
++ __for_each(__lhs, __rhs,
++ [&](auto __meta, auto& __native_lhs, auto __native_rhs)
++ constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
++ {
++ __meta._S_masked_assign(__meta._S_make_mask(__bits), __native_lhs,
++ __native_rhs);
++ });
+ }
+
+ // Optimization for the case where the RHS is a scalar. No need to broadcast
+ // the scalar to a simd first.
+ template <typename _Tp, typename... _As>
+- _GLIBCXX_SIMD_INTRINSIC static void
++ _GLIBCXX_SIMD_INTRINSIC static constexpr void
+ _S_masked_assign(const _MaskMember __bits, _SimdTuple<_Tp, _As...>& __lhs,
+ const __type_identity_t<_Tp> __rhs)
+ {
+ __for_each(
+- __lhs, [&](auto __meta, auto& __native_lhs) constexpr {
++ __lhs, [&](auto __meta, auto& __native_lhs) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ __meta._S_masked_assign(__meta._S_make_mask(__bits), __native_lhs,
+ __rhs);
+ });
+@@ -1777,28 +1752,28 @@ template <int _Np, typename>
+
+ // _S_masked_cassign {{{2
+ template <typename _Op, typename _Tp, typename... _As>
+- static inline void _S_masked_cassign(const _MaskMember __bits,
+- _SimdTuple<_Tp, _As...>& __lhs,
+- const _SimdTuple<_Tp, _As...>& __rhs,
+- _Op __op)
++ static constexpr inline void
++ _S_masked_cassign(const _MaskMember __bits, _SimdTuple<_Tp, _As...>& __lhs,
++ const _SimdTuple<_Tp, _As...>& __rhs, _Op __op)
+ {
+- __for_each(
+- __lhs, __rhs,
+- [&](auto __meta, auto& __native_lhs, auto __native_rhs) constexpr {
+- __meta.template _S_masked_cassign(__meta._S_make_mask(__bits),
+- __native_lhs, __native_rhs, __op);
+- });
++ __for_each(__lhs, __rhs,
++ [&](auto __meta, auto& __native_lhs, auto __native_rhs)
++ constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
++ {
++ __meta.template _S_masked_cassign(__meta._S_make_mask(__bits),
++ __native_lhs, __native_rhs, __op);
++ });
+ }
+
+ // Optimization for the case where the RHS is a scalar. No need to broadcast
+ // the scalar to a simd first.
+ template <typename _Op, typename _Tp, typename... _As>
+- static inline void _S_masked_cassign(const _MaskMember __bits,
+- _SimdTuple<_Tp, _As...>& __lhs,
+- const _Tp& __rhs, _Op __op)
++ static constexpr inline void
++ _S_masked_cassign(const _MaskMember __bits, _SimdTuple<_Tp, _As...>& __lhs,
++ const _Tp& __rhs, _Op __op)
+ {
+ __for_each(
+- __lhs, [&](auto __meta, auto& __native_lhs) constexpr {
++ __lhs, [&](auto __meta, auto& __native_lhs) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ __meta.template _S_masked_cassign(__meta._S_make_mask(__bits),
+ __native_lhs, __rhs, __op);
+ });
+@@ -1806,7 +1781,7 @@ template <int _Np, typename>
+
+ // _S_masked_unary {{{2
+ template <template <typename> class _Op, typename _Tp, typename... _As>
+- static inline _SimdTuple<_Tp, _As...>
++ static constexpr inline _SimdTuple<_Tp, _As...>
+ _S_masked_unary(const _MaskMember __bits, const _SimdTuple<_Tp, _As...>& __v)
+ {
+ return __v._M_apply_wrapped([&__bits](auto __meta,
+@@ -1853,6 +1828,13 @@ template <int _Np, typename>
+ _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember
+ _S_load(const bool* __mem)
+ {
++ if (__builtin_is_constant_evaluated())
++ {
++ _MaskMember __r{};
++ for (size_t __i = 0; __i < _Np; ++__i)
++ __r.set(__i, __mem[__i]);
++ return __r;
++ }
+ using _Ip = __int_for_sizeof_t<bool>;
+ // the following load uses element_aligned and relies on __mem already
+ // carrying alignment information from when this load function was
+@@ -1888,18 +1870,19 @@ template <int _Np, typename>
+ // }}}
+ // _S_from_bitmask {{{2
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _MaskMember
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember
+ _S_from_bitmask(_MaskMember __bits, _TypeTag<_Tp>) noexcept
+ { return __bits; }
+
+ // _S_load {{{2
+- static inline _MaskMember _S_load(const bool* __mem) noexcept
++ static constexpr inline _MaskMember
++ _S_load(const bool* __mem) noexcept
+ {
+ // TODO: _UChar is not necessarily the best type to use here. For smaller
+ // _Np _UShort, _UInt, _ULLong, float, and double can be more efficient.
+ _ULLong __r = 0;
+ using _Vs = __fixed_size_storage_t<_UChar, _Np>;
+- __for_each(_Vs{}, [&](auto __meta, auto) {
++ __for_each(_Vs{}, [&](auto __meta, auto) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ __r |= __meta._S_mask_to_shifted_ullong(
+ __meta._S_mask_impl._S_load(&__mem[__meta._S_offset],
+ _SizeConstant<__meta._S_size()>()));
+@@ -1908,19 +1891,19 @@ template <int _Np, typename>
+ }
+
+ // _S_masked_load {{{2
+- static inline _MaskMember _S_masked_load(_MaskMember __merge,
+- _MaskMember __mask,
+- const bool* __mem) noexcept
++ static constexpr inline _MaskMember
++ _S_masked_load(_MaskMember __merge, _MaskMember __mask, const bool* __mem) noexcept
+ {
+- _BitOps::_S_bit_iteration(__mask.to_ullong(), [&](auto __i) {
+- __merge.set(__i, __mem[__i]);
+- });
++ _BitOps::_S_bit_iteration(__mask.to_ullong(),
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ __merge.set(__i, __mem[__i]);
++ });
+ return __merge;
+ }
+
+ // _S_store {{{2
+- static inline void _S_store(const _MaskMember __bitmask,
+- bool* __mem) noexcept
++ static constexpr inline void
++ _S_store(const _MaskMember __bitmask, bool* __mem) noexcept
+ {
+ if constexpr (_Np == 1)
+ __mem[0] = __bitmask[0];
+@@ -1929,18 +1912,19 @@ template <int _Np, typename>
+ }
+
+ // _S_masked_store {{{2
+- static inline void _S_masked_store(const _MaskMember __v, bool* __mem,
+- const _MaskMember __k) noexcept
++ static constexpr inline void
++ _S_masked_store(const _MaskMember __v, bool* __mem, const _MaskMember __k) noexcept
+ {
+- _BitOps::_S_bit_iteration(__k, [&](auto __i) { __mem[__i] = __v[__i]; });
++ _BitOps::_S_bit_iteration(
++ __k, [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { __mem[__i] = __v[__i]; });
+ }
+
+ // logical and bitwise operators {{{2
+- _GLIBCXX_SIMD_INTRINSIC static _MaskMember
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember
+ _S_logical_and(const _MaskMember& __x, const _MaskMember& __y) noexcept
+ { return __x & __y; }
+
+- _GLIBCXX_SIMD_INTRINSIC static _MaskMember
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember
+ _S_logical_or(const _MaskMember& __x, const _MaskMember& __y) noexcept
+ { return __x | __y; }
+
+@@ -1948,33 +1932,31 @@ template <int _Np, typename>
+ _S_bit_not(const _MaskMember& __x) noexcept
+ { return ~__x; }
+
+- _GLIBCXX_SIMD_INTRINSIC static _MaskMember
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember
+ _S_bit_and(const _MaskMember& __x, const _MaskMember& __y) noexcept
+ { return __x & __y; }
+
+- _GLIBCXX_SIMD_INTRINSIC static _MaskMember
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember
+ _S_bit_or(const _MaskMember& __x, const _MaskMember& __y) noexcept
+ { return __x | __y; }
+
+- _GLIBCXX_SIMD_INTRINSIC static _MaskMember
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember
+ _S_bit_xor(const _MaskMember& __x, const _MaskMember& __y) noexcept
+ { return __x ^ __y; }
+
+ // smart_reference access {{{2
+- _GLIBCXX_SIMD_INTRINSIC static void _S_set(_MaskMember& __k, int __i,
+- bool __x) noexcept
++ _GLIBCXX_SIMD_INTRINSIC static constexpr void
++ _S_set(_MaskMember& __k, int __i, bool __x) noexcept
+ { __k.set(__i, __x); }
+
+ // _S_masked_assign {{{2
+- _GLIBCXX_SIMD_INTRINSIC static void
+- _S_masked_assign(const _MaskMember __k, _MaskMember& __lhs,
+- const _MaskMember __rhs)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr void
++ _S_masked_assign(const _MaskMember __k, _MaskMember& __lhs, const _MaskMember __rhs)
+ { __lhs = (__lhs & ~__k) | (__rhs & __k); }
+
+ // Optimization for the case where the RHS is a scalar.
+- _GLIBCXX_SIMD_INTRINSIC static void _S_masked_assign(const _MaskMember __k,
+- _MaskMember& __lhs,
+- const bool __rhs)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr void
++ _S_masked_assign(const _MaskMember __k, _MaskMember& __lhs, const bool __rhs)
+ {
+ if (__rhs)
+ __lhs |= __k;
+@@ -1985,25 +1967,28 @@ template <int _Np, typename>
+ // }}}2
+ // _S_all_of {{{
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static bool _S_all_of(simd_mask<_Tp, _Abi> __k)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
++ _S_all_of(simd_mask<_Tp, _Abi> __k)
+ { return __data(__k).all(); }
+
+ // }}}
+ // _S_any_of {{{
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static bool _S_any_of(simd_mask<_Tp, _Abi> __k)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
++ _S_any_of(simd_mask<_Tp, _Abi> __k)
+ { return __data(__k).any(); }
+
+ // }}}
+ // _S_none_of {{{
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static bool _S_none_of(simd_mask<_Tp, _Abi> __k)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
++ _S_none_of(simd_mask<_Tp, _Abi> __k)
+ { return __data(__k).none(); }
+
+ // }}}
+ // _S_some_of {{{
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static bool
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
+ _S_some_of([[maybe_unused]] simd_mask<_Tp, _Abi> __k)
+ {
+ if constexpr (_Np == 1)
+@@ -2015,20 +2000,21 @@ template <int _Np, typename>
+ // }}}
+ // _S_popcount {{{
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static int _S_popcount(simd_mask<_Tp, _Abi> __k)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr int
++ _S_popcount(simd_mask<_Tp, _Abi> __k)
+ { return __data(__k).count(); }
+
+ // }}}
+ // _S_find_first_set {{{
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static int
++ _GLIBCXX_SIMD_INTRINSIC static constexpr int
+ _S_find_first_set(simd_mask<_Tp, _Abi> __k)
+ { return std::__countr_zero(__data(__k).to_ullong()); }
+
+ // }}}
+ // _S_find_last_set {{{
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static int
++ _GLIBCXX_SIMD_INTRINSIC static constexpr int
+ _S_find_last_set(simd_mask<_Tp, _Abi> __k)
+ { return std::__bit_width(__data(__k).to_ullong()) - 1; }
+
+--- a/src/libstdc++-v3/include/experimental/bits/simd_math.h
++++ b/src/libstdc++-v3/include/experimental/bits/simd_math.h
+@@ -788,7 +788,7 @@ template <typename _Tp, typename _Abi, typename = __detail::__odr_helper>
+
+ // __exponent(__x) returns the exponent value (bias removed) as
+ // simd<_Up> with integral _Up
+- auto&& __exponent = [](const _V& __v) {
++ auto&& __exponent = [](const _V& __v) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ using namespace std::experimental::__proposed;
+ using _IV = rebind_simd_t<
+ conditional_t<sizeof(_Tp) == sizeof(_LLong), _LLong, int>, _V>;
+@@ -931,7 +931,7 @@ template <typename _R, typename _ToApply, typename _Tp, typename... _Tps>
+ {
+ return {__private_init,
+ __data(__arg0)._M_apply_per_chunk(
+- [&](auto __impl, const auto&... __inner) {
++ [&](auto __impl, const auto&... __inner) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ using _V = typename decltype(__impl)::simd_type;
+ return __data(__apply(_V(__private_init, __inner)...));
+ },
+@@ -1092,8 +1092,9 @@ _GLIBCXX_SIMD_CVTING2(hypot)
+ if constexpr (__is_fixed_size_abi_v<_Abi> && _V::size() > 1)
+ {
+ return __fixed_size_apply<simd<_Tp, _Abi>>(
+- [](auto __a, auto __b, auto __c) { return hypot(__a, __b, __c); },
+- __x, __y, __z);
++ [](auto __a, auto __b, auto __c) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return hypot(__a, __b, __c);
++ }, __x, __y, __z);
+ }
+ else
+ {
+@@ -1380,9 +1381,9 @@ template <typename _Tp, typename _Abi, typename = __detail::__odr_helper>
+ const fixed_size_simd<unsigned, simd_size_v<_Tp, _Abi>>& __m,
+ const simd<_Tp, _Abi>& __x)
+ {
+- return simd<_Tp, _Abi>([&](auto __i) {
+- return std::assoc_laguerre(__n[__i], __m[__i], __x[__i]);
+- });
++ return simd<_Tp, _Abi>([&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return std::assoc_laguerre(__n[__i], __m[__i], __x[__i]);
++ });
+ }
+
+ template <typename _Tp, typename _Abi, typename = __detail::__odr_helper>
+@@ -1391,9 +1392,9 @@ template <typename _Tp, typename _Abi, typename = __detail::__odr_helper>
+ const fixed_size_simd<unsigned, simd_size_v<_Tp, _Abi>>& __m,
+ const simd<_Tp, _Abi>& __x)
+ {
+- return simd<_Tp, _Abi>([&](auto __i) {
+- return std::assoc_legendre(__n[__i], __m[__i], __x[__i]);
+- });
++ return simd<_Tp, _Abi>([&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return std::assoc_legendre(__n[__i], __m[__i], __x[__i]);
++ });
+ }
+
+ _GLIBCXX_SIMD_MATH_CALL2_(beta, _Tp)
+@@ -1414,8 +1415,9 @@ template <typename _Tp, typename _Abi, typename = __detail::__odr_helper>
+ hermite(const fixed_size_simd<unsigned, simd_size_v<_Tp, _Abi>>& __n,
+ const simd<_Tp, _Abi>& __x)
+ {
+- return simd<_Tp, _Abi>(
+- [&](auto __i) { return std::hermite(__n[__i], __x[__i]); });
++ return simd<_Tp, _Abi>([&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return std::hermite(__n[__i], __x[__i]);
++ });
+ }
+
+ template <typename _Tp, typename _Abi, typename = __detail::__odr_helper>
+@@ -1423,8 +1425,9 @@ template <typename _Tp, typename _Abi, typename = __detail::__odr_helper>
+ laguerre(const fixed_size_simd<unsigned, simd_size_v<_Tp, _Abi>>& __n,
+ const simd<_Tp, _Abi>& __x)
+ {
+- return simd<_Tp, _Abi>(
+- [&](auto __i) { return std::laguerre(__n[__i], __x[__i]); });
++ return simd<_Tp, _Abi>([&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return std::laguerre(__n[__i], __x[__i]);
++ });
+ }
+
+ template <typename _Tp, typename _Abi, typename = __detail::__odr_helper>
+@@ -1432,8 +1435,9 @@ template <typename _Tp, typename _Abi, typename = __detail::__odr_helper>
+ legendre(const fixed_size_simd<unsigned, simd_size_v<_Tp, _Abi>>& __n,
+ const simd<_Tp, _Abi>& __x)
+ {
+- return simd<_Tp, _Abi>(
+- [&](auto __i) { return std::legendre(__n[__i], __x[__i]); });
++ return simd<_Tp, _Abi>([&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return std::legendre(__n[__i], __x[__i]);
++ });
+ }
+
+ _GLIBCXX_SIMD_MATH_CALL_(riemann_zeta)
+@@ -1443,8 +1447,9 @@ template <typename _Tp, typename _Abi, typename = __detail::__odr_helper>
+ sph_bessel(const fixed_size_simd<unsigned, simd_size_v<_Tp, _Abi>>& __n,
+ const simd<_Tp, _Abi>& __x)
+ {
+- return simd<_Tp, _Abi>(
+- [&](auto __i) { return std::sph_bessel(__n[__i], __x[__i]); });
++ return simd<_Tp, _Abi>([&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return std::sph_bessel(__n[__i], __x[__i]);
++ });
+ }
+
+ template <typename _Tp, typename _Abi, typename = __detail::__odr_helper>
+@@ -1453,9 +1458,9 @@ template <typename _Tp, typename _Abi, typename = __detail::__odr_helper>
+ const fixed_size_simd<unsigned, simd_size_v<_Tp, _Abi>>& __m,
+ const simd<_Tp, _Abi>& theta)
+ {
+- return simd<_Tp, _Abi>([&](auto __i) {
+- return std::assoc_legendre(__l[__i], __m[__i], theta[__i]);
+- });
++ return simd<_Tp, _Abi>([&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return std::assoc_legendre(__l[__i], __m[__i], theta[__i]);
++ });
+ }
+
+ template <typename _Tp, typename _Abi, typename = __detail::__odr_helper>
+@@ -1463,8 +1468,9 @@ template <typename _Tp, typename _Abi, typename = __detail::__odr_helper>
+ sph_neumann(const fixed_size_simd<unsigned, simd_size_v<_Tp, _Abi>>& __n,
+ const simd<_Tp, _Abi>& __x)
+ {
+- return simd<_Tp, _Abi>(
+- [&](auto __i) { return std::sph_neumann(__n[__i], __x[__i]); });
++ return simd<_Tp, _Abi>([&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return std::sph_neumann(__n[__i], __x[__i]);
++ });
+ }
+ // }}}
+
+--- a/src/libstdc++-v3/include/experimental/bits/simd_neon.h
++++ b/src/libstdc++-v3/include/experimental/bits/simd_neon.h
+@@ -61,7 +61,7 @@ template <typename _Abi, typename>
+ _S_masked_load(_SimdWrapper<_Tp, _Np> __merge, _MaskMember<_Tp> __k,
+ const _Up* __mem) noexcept
+ {
+- __execute_n_times<_Np>([&](auto __i) {
++ __execute_n_times<_Np>([&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ if (__k[__i] != 0)
+ __merge._M_set(__i, static_cast<_Tp>(__mem[__i]));
+ });
+@@ -75,7 +75,7 @@ template <typename _Abi, typename>
+ _S_masked_store_nocvt(_SimdWrapper<_Tp, _Np> __v, _Tp* __mem,
+ _MaskMember<_Tp> __k)
+ {
+- __execute_n_times<_Np>([&](auto __i) {
++ __execute_n_times<_Np>([&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ if (__k[__i] != 0)
+ __mem[__i] = __v[__i];
+ });
+@@ -84,57 +84,54 @@ template <typename _Abi, typename>
+ // }}}
+ // _S_reduce {{{
+ template <typename _Tp, typename _BinaryOperation>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _Tp
+ _S_reduce(simd<_Tp, _Abi> __x, _BinaryOperation&& __binary_op)
+ {
+- constexpr size_t _Np = __x.size();
+- if constexpr (sizeof(__x) == 16 && _Np >= 4
+- && !_Abi::template _S_is_partial<_Tp>)
+- {
+- const auto __halves = split<simd<_Tp, simd_abi::_Neon<8>>>(__x);
+- const auto __y = __binary_op(__halves[0], __halves[1]);
+- return _SimdImplNeon<simd_abi::_Neon<8>>::_S_reduce(
+- __y, static_cast<_BinaryOperation&&>(__binary_op));
+- }
+- else if constexpr (_Np == 8)
+- {
+- __x = __binary_op(__x, _Base::template _M_make_simd<_Tp, _Np>(
+- __vector_permute<1, 0, 3, 2, 5, 4, 7, 6>(
+- __x._M_data)));
+- __x = __binary_op(__x, _Base::template _M_make_simd<_Tp, _Np>(
+- __vector_permute<3, 2, 1, 0, 7, 6, 5, 4>(
+- __x._M_data)));
+- __x = __binary_op(__x, _Base::template _M_make_simd<_Tp, _Np>(
+- __vector_permute<7, 6, 5, 4, 3, 2, 1, 0>(
+- __x._M_data)));
+- return __x[0];
+- }
+- else if constexpr (_Np == 4)
+- {
+- __x
+- = __binary_op(__x, _Base::template _M_make_simd<_Tp, _Np>(
+- __vector_permute<1, 0, 3, 2>(__x._M_data)));
+- __x
+- = __binary_op(__x, _Base::template _M_make_simd<_Tp, _Np>(
+- __vector_permute<3, 2, 1, 0>(__x._M_data)));
+- return __x[0];
+- }
+- else if constexpr (_Np == 2)
++ if (not __builtin_is_constant_evaluated())
+ {
+- __x = __binary_op(__x, _Base::template _M_make_simd<_Tp, _Np>(
+- __vector_permute<1, 0>(__x._M_data)));
+- return __x[0];
++ constexpr size_t _Np = __x.size();
++ if constexpr (sizeof(__x) == 16 && _Np >= 4
++ && !_Abi::template _S_is_partial<_Tp>)
++ {
++ const auto __halves = split<simd<_Tp, simd_abi::_Neon<8>>>(__x);
++ const auto __y = __binary_op(__halves[0], __halves[1]);
++ return _SimdImplNeon<simd_abi::_Neon<8>>::_S_reduce(
++ __y, static_cast<_BinaryOperation&&>(__binary_op));
++ }
++ else if constexpr (_Np == 8)
++ {
++ __x = __binary_op(__x, _Base::template _M_make_simd<_Tp, _Np>(
++ __vector_permute<1, 0, 3, 2, 5, 4, 7, 6>(__x._M_data)));
++ __x = __binary_op(__x, _Base::template _M_make_simd<_Tp, _Np>(
++ __vector_permute<3, 2, 1, 0, 7, 6, 5, 4>(__x._M_data)));
++ __x = __binary_op(__x, _Base::template _M_make_simd<_Tp, _Np>(
++ __vector_permute<7, 6, 5, 4, 3, 2, 1, 0>(__x._M_data)));
++ return __x[0];
++ }
++ else if constexpr (_Np == 4)
++ {
++ __x = __binary_op(__x, _Base::template _M_make_simd<_Tp, _Np>(
++ __vector_permute<1, 0, 3, 2>(__x._M_data)));
++ __x = __binary_op(__x, _Base::template _M_make_simd<_Tp, _Np>(
++ __vector_permute<3, 2, 1, 0>(__x._M_data)));
++ return __x[0];
++ }
++ else if constexpr (_Np == 2)
++ {
++ __x = __binary_op(__x, _Base::template _M_make_simd<_Tp, _Np>(
++ __vector_permute<1, 0>(__x._M_data)));
++ return __x[0];
++ }
+ }
+- else
+- return _Base::_S_reduce(__x,
+- static_cast<_BinaryOperation&&>(__binary_op));
++ return _Base::_S_reduce(__x, static_cast<_BinaryOperation&&>(__binary_op));
+ }
+
+ // }}}
+ // math {{{
+ // _S_sqrt {{{
+ template <typename _Tp, typename _TVT = _VectorTraits<_Tp>>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_sqrt(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_sqrt(_Tp __x)
+ {
+ if constexpr (__have_neon_a64)
+ {
+@@ -157,7 +154,8 @@ template <typename _Abi, typename>
+ // }}}
+ // _S_trunc {{{
+ template <typename _TW, typename _TVT = _VectorTraits<_TW>>
+- _GLIBCXX_SIMD_INTRINSIC static _TW _S_trunc(_TW __x)
++ _GLIBCXX_SIMD_INTRINSIC static _TW
++ _S_trunc(_TW __x)
+ {
+ using _Tp = typename _TVT::value_type;
+ if constexpr (__have_neon_a32)
+@@ -216,7 +214,8 @@ template <typename _Abi, typename>
+ // }}}
+ // _S_floor {{{
+ template <typename _Tp, typename _TVT = _VectorTraits<_Tp>>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_floor(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_floor(_Tp __x)
+ {
+ if constexpr (__have_neon_a32)
+ {
+@@ -239,7 +238,8 @@ template <typename _Abi, typename>
+ // }}}
+ // _S_ceil {{{
+ template <typename _Tp, typename _TVT = _VectorTraits<_Tp>>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_ceil(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_ceil(_Tp __x)
+ {
+ if constexpr (__have_neon_a32)
+ {
+@@ -286,7 +286,7 @@ struct _MaskImplNeonMixin
+ {
+ constexpr auto __bitsel
+ = __generate_from_n_evaluations<16, __vector_type_t<_I, 16>>(
+- [&](auto __i) {
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ return static_cast<_I>(
+ __i < _Np ? (__i < 8 ? 1 << __i : 1 << (__i - 8)) : 0);
+ });
+@@ -306,7 +306,7 @@ struct _MaskImplNeonMixin
+ {
+ constexpr auto __bitsel
+ = __generate_from_n_evaluations<8, __vector_type_t<_I, 8>>(
+- [&](auto __i) {
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ return static_cast<_I>(__i < _Np ? 1 << __i : 0);
+ });
+ __asint &= __bitsel;
+@@ -322,7 +322,7 @@ struct _MaskImplNeonMixin
+ {
+ constexpr auto __bitsel
+ = __generate_from_n_evaluations<4, __vector_type_t<_I, 4>>(
+- [&](auto __i) {
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ return static_cast<_I>(__i < _Np ? 1 << __i : 0);
+ });
+ __asint &= __bitsel;
+@@ -346,7 +346,7 @@ struct _MaskImplNeonMixin
+ {
+ constexpr auto __bitsel
+ = __generate_from_n_evaluations<8, __vector_type_t<_I, 8>>(
+- [&](auto __i) {
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ return static_cast<_I>(__i < _Np ? 1 << __i : 0);
+ });
+ __asint &= __bitsel;
+@@ -361,7 +361,7 @@ struct _MaskImplNeonMixin
+ {
+ constexpr auto __bitsel
+ = __generate_from_n_evaluations<4, __vector_type_t<_I, 4>>(
+- [&](auto __i) {
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ return static_cast<_I>(__i < _Np ? 1 << __i : 0);
+ });
+ __asint &= __bitsel;
+@@ -400,7 +400,8 @@ template <typename _Abi, typename>
+
+ // _S_all_of {{{
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static bool _S_all_of(simd_mask<_Tp, _Abi> __k)
++ _GLIBCXX_SIMD_INTRINSIC static bool
++ _S_all_of(simd_mask<_Tp, _Abi> __k)
+ {
+ const auto __kk
+ = __vector_bitcast<char>(__k._M_data)
+@@ -419,7 +420,8 @@ template <typename _Abi, typename>
+ // }}}
+ // _S_any_of {{{
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static bool _S_any_of(simd_mask<_Tp, _Abi> __k)
++ _GLIBCXX_SIMD_INTRINSIC static bool
++ _S_any_of(simd_mask<_Tp, _Abi> __k)
+ {
+ const auto __kk
+ = __vector_bitcast<char>(__k._M_data)
+@@ -438,7 +440,8 @@ template <typename _Abi, typename>
+ // }}}
+ // _S_none_of {{{
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static bool _S_none_of(simd_mask<_Tp, _Abi> __k)
++ _GLIBCXX_SIMD_INTRINSIC static bool
++ _S_none_of(simd_mask<_Tp, _Abi> __k)
+ {
+ const auto __kk = _Abi::_S_masked(__k._M_data);
+ if constexpr (sizeof(__k) == 16)
+@@ -472,7 +475,8 @@ template <typename _Abi, typename>
+ // }}}
+ // _S_popcount {{{
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static int _S_popcount(simd_mask<_Tp, _Abi> __k)
++ _GLIBCXX_SIMD_INTRINSIC static int
++ _S_popcount(simd_mask<_Tp, _Abi> __k)
+ {
+ if constexpr (sizeof(_Tp) == 1)
+ {
+--- a/src/libstdc++-v3/include/experimental/bits/simd_ppc.h
++++ b/src/libstdc++-v3/include/experimental/bits/simd_ppc.h
+@@ -64,7 +64,7 @@ template <typename _Abi, typename>
+ __x = _Base::_S_bit_shift_left(__x, __y);
+ if constexpr (sizeof(_Tp) < sizeof(int))
+ {
+- if (__y >= sizeof(_Tp) * __CHAR_BIT__)
++ if (__y >= int(sizeof(_Tp) * __CHAR_BIT__))
+ return {};
+ }
+ return __x;
+@@ -124,12 +124,14 @@ template <typename _Abi, typename>
+
+ // _S_popcount {{{
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static int _S_popcount(simd_mask<_Tp, _Abi> __k)
++ _GLIBCXX_SIMD_INTRINSIC static int
++ _S_popcount(simd_mask<_Tp, _Abi> __k)
+ {
+ const auto __kv = __as_vector(__k);
+ if constexpr (__have_power10vec)
+ {
+- return vec_cntm(__to_intrin(__kv), 1);
++ using _Intrin = __intrinsic_type16_t<make_unsigned_t<__int_for_sizeof_t<_Tp>>>;
++ return vec_cntm(reinterpret_cast<_Intrin>(__kv), 1);
+ }
+ else if constexpr (sizeof(_Tp) >= sizeof(int))
+ {
+--- a/src/libstdc++-v3/include/experimental/bits/simd_scalar.h
++++ b/src/libstdc++-v3/include/experimental/bits/simd_scalar.h
+@@ -74,7 +74,8 @@ struct simd_abi::_Scalar
+ template <typename _Tp>
+ static constexpr bool _S_is_valid_v = _IsValid<_Tp>::value;
+
+- _GLIBCXX_SIMD_INTRINSIC static constexpr bool _S_masked(bool __x)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
++ _S_masked(bool __x)
+ { return __x; }
+
+ using _CommonImpl = _CommonImplScalar;
+@@ -110,7 +111,8 @@ struct _CommonImplScalar
+ {
+ // _S_store {{{
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static void _S_store(_Tp __x, void* __addr)
++ _GLIBCXX_SIMD_INTRINSIC static void
++ _S_store(_Tp __x, void* __addr)
+ { __builtin_memcpy(__addr, &__x, sizeof(_Tp)); }
+
+ // }}}
+@@ -138,26 +140,26 @@ struct _SimdImplScalar
+
+ // _S_broadcast {{{2
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static constexpr _Tp _S_broadcast(_Tp __x) noexcept
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _Tp
++ _S_broadcast(_Tp __x) noexcept
+ { return __x; }
+
+ // _S_generator {{{2
+ template <typename _Fp, typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static constexpr _Tp _S_generator(_Fp&& __gen,
+- _TypeTag<_Tp>)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _Tp
++ _S_generator(_Fp&& __gen, _TypeTag<_Tp>)
+ { return __gen(_SizeConstant<0>()); }
+
+ // _S_load {{{2
+ template <typename _Tp, typename _Up>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_load(const _Up* __mem,
+- _TypeTag<_Tp>) noexcept
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _Tp
++ _S_load(const _Up* __mem, _TypeTag<_Tp>) noexcept
+ { return static_cast<_Tp>(__mem[0]); }
+
+ // _S_masked_load {{{2
+ template <typename _Tp, typename _Up>
+- _GLIBCXX_SIMD_INTRINSIC
+- static _Tp _S_masked_load(_Tp __merge, bool __k,
+- const _Up* __mem) noexcept
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _Tp
++ _S_masked_load(_Tp __merge, bool __k, const _Up* __mem) noexcept
+ {
+ if (__k)
+ __merge = static_cast<_Tp>(__mem[0]);
+@@ -166,97 +168,95 @@ struct _SimdImplScalar
+
+ // _S_store {{{2
+ template <typename _Tp, typename _Up>
+- _GLIBCXX_SIMD_INTRINSIC
+- static void _S_store(_Tp __v, _Up* __mem, _TypeTag<_Tp>) noexcept
++ _GLIBCXX_SIMD_INTRINSIC static constexpr void
++ _S_store(_Tp __v, _Up* __mem, _TypeTag<_Tp>) noexcept
+ { __mem[0] = static_cast<_Up>(__v); }
+
+ // _S_masked_store {{{2
+ template <typename _Tp, typename _Up>
+- _GLIBCXX_SIMD_INTRINSIC
+- static void _S_masked_store(const _Tp __v, _Up* __mem,
+- const bool __k) noexcept
++ _GLIBCXX_SIMD_INTRINSIC static constexpr void
++ _S_masked_store(const _Tp __v, _Up* __mem, const bool __k) noexcept
+ { if (__k) __mem[0] = __v; }
+
+ // _S_negate {{{2
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC
+- static constexpr bool _S_negate(_Tp __x) noexcept
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
++ _S_negate(_Tp __x) noexcept
+ { return !__x; }
+
+ // _S_reduce {{{2
+ template <typename _Tp, typename _BinaryOperation>
+- _GLIBCXX_SIMD_INTRINSIC
+- static constexpr _Tp
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _Tp
+ _S_reduce(const simd<_Tp, simd_abi::scalar>& __x, const _BinaryOperation&)
+ { return __x._M_data; }
+
+ // _S_min, _S_max {{{2
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC
+- static constexpr _Tp _S_min(const _Tp __a, const _Tp __b)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _Tp
++ _S_min(const _Tp __a, const _Tp __b)
+ { return std::min(__a, __b); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC
+- static constexpr _Tp _S_max(const _Tp __a, const _Tp __b)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _Tp
++ _S_max(const _Tp __a, const _Tp __b)
+ { return std::max(__a, __b); }
+
+ // _S_complement {{{2
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC
+- static constexpr _Tp _S_complement(_Tp __x) noexcept
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _Tp
++ _S_complement(_Tp __x) noexcept
+ { return static_cast<_Tp>(~__x); }
+
+ // _S_unary_minus {{{2
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC
+- static constexpr _Tp _S_unary_minus(_Tp __x) noexcept
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _Tp
++ _S_unary_minus(_Tp __x) noexcept
+ { return static_cast<_Tp>(-__x); }
+
+ // arithmetic operators {{{2
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC
+- static constexpr _Tp _S_plus(_Tp __x, _Tp __y)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _Tp
++ _S_plus(_Tp __x, _Tp __y)
+ {
+ return static_cast<_Tp>(__promote_preserving_unsigned(__x)
+ + __promote_preserving_unsigned(__y));
+ }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC
+- static constexpr _Tp _S_minus(_Tp __x, _Tp __y)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _Tp
++ _S_minus(_Tp __x, _Tp __y)
+ {
+ return static_cast<_Tp>(__promote_preserving_unsigned(__x)
+ - __promote_preserving_unsigned(__y));
+ }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC
+- static constexpr _Tp _S_multiplies(_Tp __x, _Tp __y)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _Tp
++ _S_multiplies(_Tp __x, _Tp __y)
+ {
+ return static_cast<_Tp>(__promote_preserving_unsigned(__x)
+ * __promote_preserving_unsigned(__y));
+ }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC
+- static constexpr _Tp _S_divides(_Tp __x, _Tp __y)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _Tp
++ _S_divides(_Tp __x, _Tp __y)
+ {
+ return static_cast<_Tp>(__promote_preserving_unsigned(__x)
+ / __promote_preserving_unsigned(__y));
+ }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC
+- static constexpr _Tp _S_modulus(_Tp __x, _Tp __y)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _Tp
++ _S_modulus(_Tp __x, _Tp __y)
+ {
+ return static_cast<_Tp>(__promote_preserving_unsigned(__x)
+ % __promote_preserving_unsigned(__y));
+ }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC
+- static constexpr _Tp _S_bit_and(_Tp __x, _Tp __y)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _Tp
++ _S_bit_and(_Tp __x, _Tp __y)
+ {
+ if constexpr (is_floating_point_v<_Tp>)
+ {
+@@ -269,8 +269,8 @@ struct _SimdImplScalar
+ }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC
+- static constexpr _Tp _S_bit_or(_Tp __x, _Tp __y)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _Tp
++ _S_bit_or(_Tp __x, _Tp __y)
+ {
+ if constexpr (is_floating_point_v<_Tp>)
+ {
+@@ -283,8 +283,8 @@ struct _SimdImplScalar
+ }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC
+- static constexpr _Tp _S_bit_xor(_Tp __x, _Tp __y)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _Tp
++ _S_bit_xor(_Tp __x, _Tp __y)
+ {
+ if constexpr (is_floating_point_v<_Tp>)
+ {
+@@ -297,13 +297,13 @@ struct _SimdImplScalar
+ }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC
+- static constexpr _Tp _S_bit_shift_left(_Tp __x, int __y)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _Tp
++ _S_bit_shift_left(_Tp __x, int __y)
+ { return static_cast<_Tp>(__promote_preserving_unsigned(__x) << __y); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC
+- static constexpr _Tp _S_bit_shift_right(_Tp __x, int __y)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _Tp
++ _S_bit_shift_right(_Tp __x, int __y)
+ { return static_cast<_Tp>(__promote_preserving_unsigned(__x) >> __y); }
+
+ // math {{{2
+@@ -312,300 +312,362 @@ struct _SimdImplScalar
+ using _ST = _SimdTuple<_Tp, simd_abi::scalar>;
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_acos(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_acos(_Tp __x)
+ { return std::acos(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_asin(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_asin(_Tp __x)
+ { return std::asin(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_atan(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_atan(_Tp __x)
+ { return std::atan(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_cos(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_cos(_Tp __x)
+ { return std::cos(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_sin(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_sin(_Tp __x)
+ { return std::sin(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_tan(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_tan(_Tp __x)
+ { return std::tan(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_acosh(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_acosh(_Tp __x)
+ { return std::acosh(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_asinh(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_asinh(_Tp __x)
+ { return std::asinh(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_atanh(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_atanh(_Tp __x)
+ { return std::atanh(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_cosh(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_cosh(_Tp __x)
+ { return std::cosh(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_sinh(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_sinh(_Tp __x)
+ { return std::sinh(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_tanh(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_tanh(_Tp __x)
+ { return std::tanh(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_atan2(_Tp __x, _Tp __y)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_atan2(_Tp __x, _Tp __y)
+ { return std::atan2(__x, __y); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_exp(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_exp(_Tp __x)
+ { return std::exp(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_exp2(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_exp2(_Tp __x)
+ { return std::exp2(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_expm1(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_expm1(_Tp __x)
+ { return std::expm1(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_log(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_log(_Tp __x)
+ { return std::log(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_log10(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_log10(_Tp __x)
+ { return std::log10(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_log1p(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_log1p(_Tp __x)
+ { return std::log1p(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_log2(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_log2(_Tp __x)
+ { return std::log2(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_logb(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_logb(_Tp __x)
+ { return std::logb(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _ST<int> _S_ilogb(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _ST<int>
++ _S_ilogb(_Tp __x)
+ { return {std::ilogb(__x)}; }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_pow(_Tp __x, _Tp __y)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_pow(_Tp __x, _Tp __y)
+ { return std::pow(__x, __y); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_abs(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_abs(_Tp __x)
+ { return std::abs(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_fabs(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_fabs(_Tp __x)
+ { return std::fabs(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_sqrt(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_sqrt(_Tp __x)
+ { return std::sqrt(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_cbrt(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_cbrt(_Tp __x)
+ { return std::cbrt(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_erf(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_erf(_Tp __x)
+ { return std::erf(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_erfc(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_erfc(_Tp __x)
+ { return std::erfc(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_lgamma(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_lgamma(_Tp __x)
+ { return std::lgamma(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_tgamma(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_tgamma(_Tp __x)
+ { return std::tgamma(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_trunc(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_trunc(_Tp __x)
+ { return std::trunc(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_floor(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_floor(_Tp __x)
+ { return std::floor(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_ceil(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_ceil(_Tp __x)
+ { return std::ceil(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_nearbyint(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_nearbyint(_Tp __x)
+ { return std::nearbyint(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_rint(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_rint(_Tp __x)
+ { return std::rint(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _ST<long> _S_lrint(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _ST<long>
++ _S_lrint(_Tp __x)
+ { return {std::lrint(__x)}; }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _ST<long long> _S_llrint(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _ST<long long>
++ _S_llrint(_Tp __x)
+ { return {std::llrint(__x)}; }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_round(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_round(_Tp __x)
+ { return std::round(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _ST<long> _S_lround(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _ST<long>
++ _S_lround(_Tp __x)
+ { return {std::lround(__x)}; }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _ST<long long> _S_llround(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static _ST<long long>
++ _S_llround(_Tp __x)
+ { return {std::llround(__x)}; }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_ldexp(_Tp __x, _ST<int> __y)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_ldexp(_Tp __x, _ST<int> __y)
+ { return std::ldexp(__x, __y.first); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_scalbn(_Tp __x, _ST<int> __y)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_scalbn(_Tp __x, _ST<int> __y)
+ { return std::scalbn(__x, __y.first); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_scalbln(_Tp __x, _ST<long> __y)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_scalbln(_Tp __x, _ST<long> __y)
+ { return std::scalbln(__x, __y.first); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_fmod(_Tp __x, _Tp __y)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_fmod(_Tp __x, _Tp __y)
+ { return std::fmod(__x, __y); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_remainder(_Tp __x, _Tp __y)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_remainder(_Tp __x, _Tp __y)
+ { return std::remainder(__x, __y); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_nextafter(_Tp __x, _Tp __y)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_nextafter(_Tp __x, _Tp __y)
+ { return std::nextafter(__x, __y); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_fdim(_Tp __x, _Tp __y)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_fdim(_Tp __x, _Tp __y)
+ { return std::fdim(__x, __y); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_fmax(_Tp __x, _Tp __y)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_fmax(_Tp __x, _Tp __y)
+ { return std::fmax(__x, __y); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_fmin(_Tp __x, _Tp __y)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_fmin(_Tp __x, _Tp __y)
+ { return std::fmin(__x, __y); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_fma(_Tp __x, _Tp __y, _Tp __z)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_fma(_Tp __x, _Tp __y, _Tp __z)
+ { return std::fma(__x, __y, __z); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_remquo(_Tp __x, _Tp __y, _ST<int>* __z)
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_remquo(_Tp __x, _Tp __y, _ST<int>* __z)
+ { return std::remquo(__x, __y, &__z->first); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC constexpr static _ST<int> _S_fpclassify(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _ST<int>
++ _S_fpclassify(_Tp __x)
+ { return {std::fpclassify(__x)}; }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC constexpr static bool _S_isfinite(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
++ _S_isfinite(_Tp __x)
+ { return std::isfinite(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC constexpr static bool _S_isinf(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
++ _S_isinf(_Tp __x)
+ { return std::isinf(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC constexpr static bool _S_isnan(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
++ _S_isnan(_Tp __x)
+ { return std::isnan(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC constexpr static bool _S_isnormal(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
++ _S_isnormal(_Tp __x)
+ { return std::isnormal(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC constexpr static bool _S_signbit(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
++ _S_signbit(_Tp __x)
+ { return std::signbit(__x); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC constexpr static bool _S_isgreater(_Tp __x, _Tp __y)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
++ _S_isgreater(_Tp __x, _Tp __y)
+ { return std::isgreater(__x, __y); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC constexpr static bool _S_isgreaterequal(_Tp __x,
+- _Tp __y)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
++ _S_isgreaterequal(_Tp __x, _Tp __y)
+ { return std::isgreaterequal(__x, __y); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC constexpr static bool _S_isless(_Tp __x, _Tp __y)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
++ _S_isless(_Tp __x, _Tp __y)
+ { return std::isless(__x, __y); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC constexpr static bool _S_islessequal(_Tp __x, _Tp __y)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
++ _S_islessequal(_Tp __x, _Tp __y)
+ { return std::islessequal(__x, __y); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC constexpr static bool _S_islessgreater(_Tp __x,
+- _Tp __y)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
++ _S_islessgreater(_Tp __x, _Tp __y)
+ { return std::islessgreater(__x, __y); }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC constexpr static bool _S_isunordered(_Tp __x,
+- _Tp __y)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
++ _S_isunordered(_Tp __x, _Tp __y)
+ { return std::isunordered(__x, __y); }
+
+ // _S_increment & _S_decrement{{{2
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC
+- constexpr static void _S_increment(_Tp& __x)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr void
++ _S_increment(_Tp& __x)
+ { ++__x; }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC
+- constexpr static void _S_decrement(_Tp& __x)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr void
++ _S_decrement(_Tp& __x)
+ { --__x; }
+
+
+ // compares {{{2
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC constexpr static bool _S_equal_to(_Tp __x, _Tp __y)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
++ _S_equal_to(_Tp __x, _Tp __y)
+ { return __x == __y; }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC constexpr static bool _S_not_equal_to(_Tp __x,
+- _Tp __y)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
++ _S_not_equal_to(_Tp __x, _Tp __y)
+ { return __x != __y; }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC constexpr static bool _S_less(_Tp __x, _Tp __y)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
++ _S_less(_Tp __x, _Tp __y)
+ { return __x < __y; }
+
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC constexpr static bool _S_less_equal(_Tp __x,
+- _Tp __y)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
++ _S_less_equal(_Tp __x, _Tp __y)
+ { return __x <= __y; }
+
+ // smart_reference access {{{2
+ template <typename _Tp, typename _Up>
+- _GLIBCXX_SIMD_INTRINSIC
+- constexpr static void _S_set(_Tp& __v, [[maybe_unused]] int __i,
+- _Up&& __x) noexcept
++ _GLIBCXX_SIMD_INTRINSIC static constexpr void
++ _S_set(_Tp& __v, [[maybe_unused]] int __i, _Up&& __x) noexcept
+ {
+ _GLIBCXX_DEBUG_ASSERT(__i == 0);
+ __v = static_cast<_Up&&>(__x);
+@@ -613,20 +675,20 @@ struct _SimdImplScalar
+
+ // _S_masked_assign {{{2
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC constexpr static void
++ _GLIBCXX_SIMD_INTRINSIC static constexpr void
+ _S_masked_assign(bool __k, _Tp& __lhs, _Tp __rhs)
+ { if (__k) __lhs = __rhs; }
+
+ // _S_masked_cassign {{{2
+ template <typename _Op, typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC constexpr static void
++ _GLIBCXX_SIMD_INTRINSIC static constexpr void
+ _S_masked_cassign(const bool __k, _Tp& __lhs, const _Tp __rhs, _Op __op)
+ { if (__k) __lhs = __op(_SimdImplScalar{}, __lhs, __rhs); }
+
+ // _S_masked_unary {{{2
+ template <template <typename> class _Op, typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC constexpr static _Tp _S_masked_unary(const bool __k,
+- const _Tp __v)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _Tp
++ _S_masked_unary(const bool __k, const _Tp __v)
+ { return static_cast<_Tp>(__k ? _Op<_Tp>{}(__v) : __v); }
+
+ // }}}2
+@@ -643,13 +705,15 @@ struct _MaskImplScalar
+ // }}}
+ // _S_broadcast {{{
+ template <typename>
+- _GLIBCXX_SIMD_INTRINSIC static constexpr bool _S_broadcast(bool __x)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
++ _S_broadcast(bool __x)
+ { return __x; }
+
+ // }}}
+ // _S_load {{{
+ template <typename>
+- _GLIBCXX_SIMD_INTRINSIC static constexpr bool _S_load(const bool* __mem)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
++ _S_load(const bool* __mem)
+ { return __mem[0]; }
+
+ // }}}
+@@ -673,12 +737,12 @@ struct _MaskImplScalar
+ // }}}
+ // _S_from_bitmask {{{2
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC constexpr static bool
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
+ _S_from_bitmask(_SanitizedBitMask<1> __bits, _TypeTag<_Tp>) noexcept
+ { return __bits[0]; }
+
+ // _S_masked_load {{{2
+- _GLIBCXX_SIMD_INTRINSIC constexpr static bool
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
+ _S_masked_load(bool __merge, bool __mask, const bool* __mem) noexcept
+ {
+ if (__mask)
+@@ -687,11 +751,12 @@ struct _MaskImplScalar
+ }
+
+ // _S_store {{{2
+- _GLIBCXX_SIMD_INTRINSIC static void _S_store(bool __v, bool* __mem) noexcept
++ _GLIBCXX_SIMD_INTRINSIC static constexpr void
++ _S_store(bool __v, bool* __mem) noexcept
+ { __mem[0] = __v; }
+
+ // _S_masked_store {{{2
+- _GLIBCXX_SIMD_INTRINSIC static void
++ _GLIBCXX_SIMD_INTRINSIC static constexpr void
+ _S_masked_store(const bool __v, bool* __mem, const bool __k) noexcept
+ {
+ if (__k)
+@@ -699,42 +764,41 @@ struct _MaskImplScalar
+ }
+
+ // logical and bitwise operators {{{2
+- _GLIBCXX_SIMD_INTRINSIC
+- static constexpr bool _S_logical_and(bool __x, bool __y)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
++ _S_logical_and(bool __x, bool __y)
+ { return __x && __y; }
+
+- _GLIBCXX_SIMD_INTRINSIC
+- static constexpr bool _S_logical_or(bool __x, bool __y)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
++ _S_logical_or(bool __x, bool __y)
+ { return __x || __y; }
+
+- _GLIBCXX_SIMD_INTRINSIC
+- static constexpr bool _S_bit_not(bool __x)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
++ _S_bit_not(bool __x)
+ { return !__x; }
+
+- _GLIBCXX_SIMD_INTRINSIC
+- static constexpr bool _S_bit_and(bool __x, bool __y)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
++ _S_bit_and(bool __x, bool __y)
+ { return __x && __y; }
+
+- _GLIBCXX_SIMD_INTRINSIC
+- static constexpr bool _S_bit_or(bool __x, bool __y)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
++ _S_bit_or(bool __x, bool __y)
+ { return __x || __y; }
+
+- _GLIBCXX_SIMD_INTRINSIC
+- static constexpr bool _S_bit_xor(bool __x, bool __y)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
++ _S_bit_xor(bool __x, bool __y)
+ { return __x != __y; }
+
+ // smart_reference access {{{2
+- _GLIBCXX_SIMD_INTRINSIC
+- constexpr static void _S_set(bool& __k, [[maybe_unused]] int __i,
+- bool __x) noexcept
++ _GLIBCXX_SIMD_INTRINSIC static constexpr void
++ _S_set(bool& __k, [[maybe_unused]] int __i, bool __x) noexcept
+ {
+ _GLIBCXX_DEBUG_ASSERT(__i == 0);
+ __k = __x;
+ }
+
+ // _S_masked_assign {{{2
+- _GLIBCXX_SIMD_INTRINSIC static void _S_masked_assign(bool __k, bool& __lhs,
+- bool __rhs)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr void
++ _S_masked_assign(bool __k, bool& __lhs, bool __rhs)
+ {
+ if (__k)
+ __lhs = __rhs;
+@@ -743,49 +807,49 @@ struct _MaskImplScalar
+ // }}}2
+ // _S_all_of {{{
+ template <typename _Tp, typename _Abi>
+- _GLIBCXX_SIMD_INTRINSIC constexpr static bool
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
+ _S_all_of(simd_mask<_Tp, _Abi> __k)
+ { return __k._M_data; }
+
+ // }}}
+ // _S_any_of {{{
+ template <typename _Tp, typename _Abi>
+- _GLIBCXX_SIMD_INTRINSIC constexpr static bool
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
+ _S_any_of(simd_mask<_Tp, _Abi> __k)
+ { return __k._M_data; }
+
+ // }}}
+ // _S_none_of {{{
+ template <typename _Tp, typename _Abi>
+- _GLIBCXX_SIMD_INTRINSIC constexpr static bool
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
+ _S_none_of(simd_mask<_Tp, _Abi> __k)
+ { return !__k._M_data; }
+
+ // }}}
+ // _S_some_of {{{
+ template <typename _Tp, typename _Abi>
+- _GLIBCXX_SIMD_INTRINSIC constexpr static bool
++ _GLIBCXX_SIMD_INTRINSIC static constexpr bool
+ _S_some_of(simd_mask<_Tp, _Abi>)
+ { return false; }
+
+ // }}}
+ // _S_popcount {{{
+ template <typename _Tp, typename _Abi>
+- _GLIBCXX_SIMD_INTRINSIC constexpr static int
++ _GLIBCXX_SIMD_INTRINSIC static constexpr int
+ _S_popcount(simd_mask<_Tp, _Abi> __k)
+ { return __k._M_data; }
+
+ // }}}
+ // _S_find_first_set {{{
+ template <typename _Tp, typename _Abi>
+- _GLIBCXX_SIMD_INTRINSIC constexpr static int
++ _GLIBCXX_SIMD_INTRINSIC static constexpr int
+ _S_find_first_set(simd_mask<_Tp, _Abi>)
+ { return 0; }
+
+ // }}}
+ // _S_find_last_set {{{
+ template <typename _Tp, typename _Abi>
+- _GLIBCXX_SIMD_INTRINSIC constexpr static int
++ _GLIBCXX_SIMD_INTRINSIC static constexpr int
+ _S_find_last_set(simd_mask<_Tp, _Abi>)
+ { return 0; }
+
+--- a/src/libstdc++-v3/include/experimental/bits/simd_x86.h
++++ b/src/libstdc++-v3/include/experimental/bits/simd_x86.h
+@@ -40,10 +40,7 @@ _GLIBCXX_SIMD_BEGIN_NAMESPACE
+ template <typename _Tp, size_t _Np>
+ _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper<__int_for_sizeof_t<_Tp>, _Np>
+ __to_masktype(_SimdWrapper<_Tp, _Np> __x)
+- {
+- return reinterpret_cast<__vector_type_t<__int_for_sizeof_t<_Tp>, _Np>>(
+- __x._M_data);
+- }
++ { return reinterpret_cast<__vector_type_t<__int_for_sizeof_t<_Tp>, _Np>>(__x._M_data); }
+
+ template <typename _TV,
+ typename _TVT
+@@ -366,6 +363,53 @@ template <typename _Tp>
+
+ // }}}
+
++#ifdef __clang__
++template <size_t _Np, typename _Tp, typename _Kp>
++ _GLIBCXX_SIMD_INTRINSIC constexpr auto
++ __movm(_Kp __k) noexcept
++ {
++ static_assert(is_unsigned_v<_Kp>);
++ if constexpr (sizeof(_Tp) == 1 && __have_avx512bw)
++ {
++ if constexpr (_Np <= 16 && __have_avx512vl)
++ return __builtin_ia32_cvtmask2b128(__k);
++ else if constexpr (_Np <= 32 && __have_avx512vl)
++ return __builtin_ia32_cvtmask2b256(__k);
++ else
++ return __builtin_ia32_cvtmask2b512(__k);
++ }
++ else if constexpr (sizeof(_Tp) == 2 && __have_avx512bw)
++ {
++ if constexpr (_Np <= 8 && __have_avx512vl)
++ return __builtin_ia32_cvtmask2w128(__k);
++ else if constexpr (_Np <= 16 && __have_avx512vl)
++ return __builtin_ia32_cvtmask2w256(__k);
++ else
++ return __builtin_ia32_cvtmask2w512(__k);
++ }
++ else if constexpr (sizeof(_Tp) == 4 && __have_avx512dq)
++ {
++ if constexpr (_Np <= 4 && __have_avx512vl)
++ return __builtin_ia32_cvtmask2d128(__k);
++ else if constexpr (_Np <= 8 && __have_avx512vl)
++ return __builtin_ia32_cvtmask2d256(__k);
++ else
++ return __builtin_ia32_cvtmask2d512(__k);
++ }
++ else if constexpr (sizeof(_Tp) == 8 && __have_avx512dq)
++ {
++ if constexpr (_Np <= 2 && __have_avx512vl)
++ return __builtin_ia32_cvtmask2q128(__k);
++ else if constexpr (_Np <= 4 && __have_avx512vl)
++ return __builtin_ia32_cvtmask2q256(__k);
++ else
++ return __builtin_ia32_cvtmask2q512(__k);
++ }
++ else
++ __assert_unreachable<_Tp>();
++ }
++#endif // __clang__
++
+ #ifdef _GLIBCXX_SIMD_WORKAROUND_PR85048
+ #include "simd_x86_conversions.h"
+ #endif
+@@ -434,7 +478,8 @@ struct _CommonImplX86 : _CommonImplBuiltin
+ #ifdef _GLIBCXX_SIMD_WORKAROUND_PR85048
+ // _S_converts_via_decomposition {{{
+ template <typename _From, typename _To, size_t _ToSize>
+- static constexpr bool _S_converts_via_decomposition()
++ static constexpr bool
++ _S_converts_via_decomposition()
+ {
+ if constexpr (is_integral_v<
+ _From> && is_integral_v<_To> && sizeof(_From) == 8
+@@ -465,12 +510,14 @@ struct _CommonImplX86 : _CommonImplBuiltin
+ using _CommonImplBuiltin::_S_store;
+
+ template <typename _Tp, size_t _Np>
+- _GLIBCXX_SIMD_INTRINSIC static void _S_store(_SimdWrapper<_Tp, _Np> __x,
+- void* __addr)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr void
++ _S_store(_SimdWrapper<_Tp, _Np> __x, void* __addr)
+ {
+ constexpr size_t _Bytes = _Np * sizeof(_Tp);
+
+- if constexpr ((_Bytes & (_Bytes - 1)) != 0 && __have_avx512bw_vl)
++ if (__builtin_is_constant_evaluated())
++ _CommonImplBuiltin::_S_store(__x, __addr);
++ else if constexpr ((_Bytes & (_Bytes - 1)) != 0 && __have_avx512bw_vl)
+ {
+ const auto __v = __to_intrin(__x);
+
+@@ -536,17 +583,20 @@ struct _CommonImplX86 : _CommonImplBuiltin
+ _GLIBCXX_SIMD_INTRINSIC static constexpr void
+ _S_store_bool_array(const _BitMask<_Np, _Sanitized> __x, bool* __mem)
+ {
+- if constexpr (__have_avx512bw_vl) // don't care for BW w/o VL
+- _S_store<_Np>(1 & __vector_bitcast<_UChar, _Np>([=]() constexpr {
+- if constexpr (_Np <= 16)
+- return _mm_movm_epi8(__x._M_to_bits());
+- else if constexpr (_Np <= 32)
+- return _mm256_movm_epi8(__x._M_to_bits());
+- else if constexpr (_Np <= 64)
+- return _mm512_movm_epi8(__x._M_to_bits());
+- else
+- __assert_unreachable<_SizeConstant<_Np>>();
+- }()),
++ if (__builtin_is_constant_evaluated())
++ _CommonImplBuiltin::_S_store_bool_array(__x, __mem);
++ else if constexpr (__have_avx512bw_vl) // don't care for BW w/o VL
++ _S_store<_Np>(1 & __vector_bitcast<_UChar, _Np>(
++ [=]() constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ if constexpr (_Np <= 16)
++ return _mm_movm_epi8(__x._M_to_bits());
++ else if constexpr (_Np <= 32)
++ return _mm256_movm_epi8(__x._M_to_bits());
++ else if constexpr (_Np <= 64)
++ return _mm512_movm_epi8(__x._M_to_bits());
++ else
++ __assert_unreachable<_SizeConstant<_Np>>();
++ }()),
+ __mem);
+ else if constexpr (__have_bmi2)
+ {
+@@ -554,7 +604,7 @@ struct _CommonImplX86 : _CommonImplBuiltin
+ _S_store<_Np>(_pdep_u32(__x._M_to_bits(), 0x01010101U), __mem);
+ else
+ __execute_n_times<__div_roundup(_Np, sizeof(size_t))>(
+- [&](auto __i) {
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ constexpr size_t __offset = __i * sizeof(size_t);
+ constexpr int __todo = std::min(sizeof(size_t), _Np - __offset);
+ if constexpr (__todo == 1)
+@@ -575,7 +625,7 @@ struct _CommonImplX86 : _CommonImplBuiltin
+ });
+ }
+ else if constexpr (__have_sse2 && _Np > 7)
+- __execute_n_times<__div_roundup(_Np, 16)>([&](auto __i) {
++ __execute_n_times<__div_roundup(_Np, 16)>([&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ constexpr int __offset = __i * 16;
+ constexpr int __todo = std::min(16, int(_Np) - __offset);
+ const int __bits = __x.template _M_extract<__offset>()._M_to_bits();
+@@ -620,14 +670,13 @@ struct _CommonImplX86 : _CommonImplBuiltin
+ _GLIBCXX_SIMD_INTRINSIC static _TV
+ _S_blend_avx512(const _Kp __k, const _TV __a, const _TV __b) noexcept
+ {
+-#ifdef __clang__
+- // FIXME: this does a boolean choice, not a blend
+- return __k ? __a : __b;
+-#else
+ static_assert(__is_vector_type_v<_TV>);
+ using _Tp = typename _VectorTraits<_TV>::value_type;
+ static_assert(sizeof(_TV) >= 16);
+ static_assert(sizeof(_Tp) <= 8);
++#ifdef __clang__
++ return __movm<_VectorTraits<_TV>::_S_full_size, _Tp>(__k) ? __b : __a;
++#else
+ using _IntT
+ = conditional_t<(sizeof(_Tp) > 2),
+ conditional_t<sizeof(_Tp) == 4, int, long long>,
+@@ -701,8 +750,8 @@ struct _CommonImplX86 : _CommonImplBuiltin
+ // Requires: _Tp to be an intrinsic type (integers blend per byte) and 16/32
+ // Bytes wide
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_blend_intrin(_Tp __k, _Tp __a,
+- _Tp __b) noexcept
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_blend_intrin(_Tp __k, _Tp __a, _Tp __b) noexcept
+ {
+ static_assert(is_same_v<decltype(__to_intrin(__a)), _Tp>);
+ constexpr struct
+@@ -765,9 +814,10 @@ struct _CommonImplX86 : _CommonImplBuiltin
+ static_assert(is_same_v<_Tp, _Tp> && __have_avx512f);
+ if (__k._M_is_constprop() && __at0._M_is_constprop()
+ && __at1._M_is_constprop())
+- return __generate_from_n_evaluations<_Np,
+- __vector_type_t<_Tp, _Np>>([&](
+- auto __i) constexpr { return __k[__i] ? __at1[__i] : __at0[__i]; });
++ return __generate_from_n_evaluations<_Np, __vector_type_t<_Tp, _Np>>(
++ [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __k[__i] ? __at1[__i] : __at0[__i];
++ });
+ else if constexpr (sizeof(__at0) == 64
+ || (__have_avx512vl && sizeof(__at0) >= 16))
+ return _S_blend_avx512(__k._M_data, __at0._M_data, __at1._M_data);
+@@ -841,6 +891,7 @@ template <typename _Abi, typename>
+ = (sizeof(_Tp) >= 4 && __have_avx512f) || __have_avx512bw ? 64
+ : (is_floating_point_v<_Tp>&& __have_avx) || __have_avx2 ? 32
+ : 16;
++
+ using _MaskImpl = typename _Abi::_MaskImpl;
+
+ // _S_masked_load {{{
+@@ -994,9 +1045,8 @@ template <typename _Abi, typename>
+ }
+ else
+ _BitOps::_S_bit_iteration(_MaskImpl::_S_to_bits(__k),
+- [&](auto __i) {
+- __merge._M_set(__i, static_cast<_Tp>(
+- __mem[__i]));
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ __merge._M_set(__i, static_cast<_Tp>(__mem[__i]));
+ });
+ }
+ /* Very uncertain, that the following improves anything. Needs
+@@ -1032,8 +1082,7 @@ template <typename _Abi, typename>
+ // _S_masked_store_nocvt {{{
+ template <typename _Tp, size_t _Np>
+ _GLIBCXX_SIMD_INTRINSIC static void
+- _S_masked_store_nocvt(_SimdWrapper<_Tp, _Np> __v, _Tp* __mem,
+- _SimdWrapper<bool, _Np> __k)
++ _S_masked_store_nocvt(_SimdWrapper<_Tp, _Np> __v, _Tp* __mem, _SimdWrapper<bool, _Np> __k)
+ {
+ [[maybe_unused]] const auto __vi = __to_intrin(__v);
+ if constexpr (sizeof(__vi) == 64)
+@@ -1300,7 +1349,8 @@ template <typename _Abi, typename>
+ // }}}
+ // _S_multiplies {{{
+ template <typename _V, typename _VVT = _VectorTraits<_V>>
+- _GLIBCXX_SIMD_INTRINSIC static constexpr _V _S_multiplies(_V __x, _V __y)
++ _GLIBCXX_SIMD_INTRINSIC static constexpr _V
++ _S_multiplies(_V __x, _V __y)
+ {
+ using _Tp = typename _VVT::value_type;
+ if (__builtin_is_constant_evaluated() || __x._M_is_constprop()
+@@ -1417,11 +1467,12 @@ template <typename _Abi, typename>
+ const auto __yf = __convert_all<_FloatV, __n_floatv>(
+ _Abi::__make_padding_nonzero(__as_vector(__y)));
+ return __call_with_n_evaluations<__n_floatv>(
+- [](auto... __quotients) {
++ [](auto... __quotients) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ return __vector_convert<_R>(__quotients...);
+ },
+- [&__xf,
+- &__yf](auto __i) -> _SimdWrapper<_Float, __n_intermediate> {
++ [&__xf, &__yf](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
++ -> _SimdWrapper<_Float, __n_intermediate>
++ {
+ #if __RECIPROCAL_MATH__
+ // If -freciprocal-math is active, using the `/` operator is
+ // incorrect because it may be translated to an imprecise
+@@ -1477,6 +1528,8 @@ template <typename _Abi, typename>
+ */
+ return _Base::_S_divides(__x, __y);
+ }
++#else
++ using _Base::_S_divides;
+ #endif // _GLIBCXX_SIMD_WORKAROUND_PR90993
+
+ // }}}
+@@ -1980,7 +2033,7 @@ template <typename _Abi, typename>
+ {
+ auto __mask = __vector_bitcast<_UChar>(
+ __vector_bitcast<_UShort>(__iy) << 5);
+- auto __maskl = [&]() {
++ auto __maskl = [&]() _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ return __to_intrin(__vector_bitcast<_UShort>(__mask) << 8);
+ };
+ auto __xh = __vector_bitcast<short>(__ix);
+@@ -2067,19 +2120,20 @@ template <typename _Abi, typename>
+ } //}}}
+ else if constexpr (sizeof(_Up) == 2 && sizeof(__x) >= 4) //{{{
+ {
+- [[maybe_unused]] auto __blend_0xaa = [](auto __a, auto __b) {
+- if constexpr (sizeof(__a) == 16)
+- return _mm_blend_epi16(__to_intrin(__a), __to_intrin(__b),
+- 0xaa);
+- else if constexpr (sizeof(__a) == 32)
+- return _mm256_blend_epi16(__to_intrin(__a), __to_intrin(__b),
+- 0xaa);
+- else if constexpr (sizeof(__a) == 64)
+- return _mm512_mask_blend_epi16(0xaaaa'aaaaU, __to_intrin(__a),
+- __to_intrin(__b));
+- else
+- __assert_unreachable<decltype(__a)>();
+- };
++ [[maybe_unused]] auto __blend_0xaa
++ = [](auto __a, auto __b) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ if constexpr (sizeof(__a) == 16)
++ return _mm_blend_epi16(__to_intrin(__a), __to_intrin(__b),
++ 0xaa);
++ else if constexpr (sizeof(__a) == 32)
++ return _mm256_blend_epi16(__to_intrin(__a), __to_intrin(__b),
++ 0xaa);
++ else if constexpr (sizeof(__a) == 64)
++ return _mm512_mask_blend_epi16(0xaaaa'aaaaU, __to_intrin(__a),
++ __to_intrin(__b));
++ else
++ __assert_unreachable<decltype(__a)>();
++ };
+ if constexpr (__have_avx512bw_vl && sizeof(_Tp) <= 16)
+ return __intrin_bitcast<_V>(is_signed_v<_Up>
+ ? _mm_srav_epi16(__ix, __iy)
+@@ -2136,9 +2190,10 @@ template <typename _Abi, typename>
+ {
+ auto __k = __vector_bitcast<_UShort>(__iy) << 11;
+ auto __x128 = __vector_bitcast<_Up>(__ix);
+- auto __mask = [](__vector_type16_t<_UShort> __kk) {
+- return __vector_bitcast<short>(__kk) < 0;
+- };
++ auto __mask
++ = [](__vector_type16_t<_UShort> __kk) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return __vector_bitcast<short>(__kk) < 0;
++ };
+ // do __x128 = 0 where __y[4] is set
+ __x128 = __mask(__k) ? decltype(__x128)() : __x128;
+ // do __x128 =>> 8 where __y[3] is set
+@@ -2178,7 +2233,7 @@ template <typename _Abi, typename>
+ }
+ else
+ {
+- auto __shift = [](auto __a, auto __b) {
++ auto __shift = [](auto __a, auto __b) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ if constexpr (is_signed_v<_Up>)
+ return _mm_sra_epi32(__a, __b);
+ else
+@@ -2268,14 +2323,14 @@ template <typename _Abi, typename>
+ } // }}}
+ else if (__builtin_is_constant_evaluated())
+ return _Base::_S_equal_to(__x, __y);
+- else if constexpr (sizeof(__x) == 8) // {{{
++ else if constexpr (sizeof(__x) == 8)
+ {
+ const auto __r128 = __vector_bitcast<_Tp, 16 / sizeof(_Tp)>(__x)
+ == __vector_bitcast<_Tp, 16 / sizeof(_Tp)>(__y);
+- _MaskMember<_Tp> __r64;
++ _MaskMember<_Tp> __r64{};
+ __builtin_memcpy(&__r64._M_data, &__r128, sizeof(__r64));
+ return __r64;
+- } // }}}
++ }
+ else
+ return _Base::_S_equal_to(__x, __y);
+ }
+@@ -2346,7 +2401,7 @@ template <typename _Abi, typename>
+ {
+ const auto __r128 = __vector_bitcast<_Tp, 16 / sizeof(_Tp)>(__x)
+ != __vector_bitcast<_Tp, 16 / sizeof(_Tp)>(__y);
+- _MaskMember<_Tp> __r64;
++ _MaskMember<_Tp> __r64{};
+ __builtin_memcpy(&__r64._M_data, &__r128, sizeof(__r64));
+ return __r64;
+ }
+@@ -2454,7 +2509,7 @@ template <typename _Abi, typename>
+ {
+ const auto __r128 = __vector_bitcast<_Tp, 16 / sizeof(_Tp)>(__x)
+ < __vector_bitcast<_Tp, 16 / sizeof(_Tp)>(__y);
+- _MaskMember<_Tp> __r64;
++ _MaskMember<_Tp> __r64{};
+ __builtin_memcpy(&__r64._M_data, &__r128, sizeof(__r64));
+ return __r64;
+ }
+@@ -2562,7 +2617,7 @@ template <typename _Abi, typename>
+ {
+ const auto __r128 = __vector_bitcast<_Tp, 16 / sizeof(_Tp)>(__x)
+ <= __vector_bitcast<_Tp, 16 / sizeof(_Tp)>(__y);
+- _MaskMember<_Tp> __r64;
++ _MaskMember<_Tp> __r64{};
+ __builtin_memcpy(&__r64._M_data, &__r128, sizeof(__r64));
+ return __r64;
+ }
+@@ -2735,7 +2790,8 @@ template <typename _Abi, typename>
+ // }}}
+ // _S_nearbyint {{{
+ template <typename _Tp, typename _TVT = _VectorTraits<_Tp>>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_nearbyint(_Tp __x) noexcept
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_nearbyint(_Tp __x) noexcept
+ {
+ if constexpr (_TVT::template _S_is<float, 16>)
+ return _mm512_roundscale_ps(__x, 0x0c);
+@@ -2760,7 +2816,8 @@ template <typename _Abi, typename>
+ // }}}
+ // _S_rint {{{
+ template <typename _Tp, typename _TVT = _VectorTraits<_Tp>>
+- _GLIBCXX_SIMD_INTRINSIC static _Tp _S_rint(_Tp __x) noexcept
++ _GLIBCXX_SIMD_INTRINSIC static _Tp
++ _S_rint(_Tp __x) noexcept
+ {
+ if constexpr (_TVT::template _S_is<float, 16>)
+ return _mm512_roundscale_ps(__x, 0x04);
+@@ -2908,7 +2965,8 @@ template <typename _Abi, typename>
+ // _S_isnonzerovalue_mask {{{
+ // (isnormal | is subnormal == !isinf & !isnan & !is zero)
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static auto _S_isnonzerovalue_mask(_Tp __x)
++ _GLIBCXX_SIMD_INTRINSIC static auto
++ _S_isnonzerovalue_mask(_Tp __x)
+ {
+ using _Traits = _VectorTraits<_Tp>;
+ if constexpr (__have_avx512dq_vl)
+@@ -3175,8 +3233,8 @@ template <typename _Abi, typename>
+ // }}}
+ // _S_isgreater {{{
+ template <typename _Tp, size_t _Np>
+- static constexpr _MaskMember<_Tp> _S_isgreater(_SimdWrapper<_Tp, _Np> __x,
+- _SimdWrapper<_Tp, _Np> __y)
++ static constexpr _MaskMember<_Tp>
++ _S_isgreater(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
+ {
+ const auto __xi = __to_intrin(__x);
+ const auto __yi = __to_intrin(__y);
+@@ -3293,8 +3351,8 @@ template <typename _Abi, typename>
+ // }}}
+ // _S_isless {{{
+ template <typename _Tp, size_t _Np>
+- static constexpr _MaskMember<_Tp> _S_isless(_SimdWrapper<_Tp, _Np> __x,
+- _SimdWrapper<_Tp, _Np> __y)
++ static constexpr _MaskMember<_Tp>
++ _S_isless(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
+ {
+ const auto __xi = __to_intrin(__x);
+ const auto __yi = __to_intrin(__y);
+@@ -3458,6 +3516,76 @@ template <typename _Abi, typename>
+ }
+
+ //}}} }}}
++ template <template <typename> class _Op, typename _Tp, typename _K, size_t _Np>
++ _GLIBCXX_SIMD_INTRINSIC static _SimdWrapper<_Tp, _Np>
++ _S_masked_unary(const _SimdWrapper<_K, _Np> __k, const _SimdWrapper<_Tp, _Np> __v)
++ {
++ if (__k._M_is_constprop_none_of())
++ return __v;
++ else if (__k._M_is_constprop_all_of())
++ {
++ auto __vv = _Base::_M_make_simd(__v);
++ _Op<decltype(__vv)> __op;
++ return __data(__op(__vv));
++ }
++ else if constexpr (__is_bitmask_v<decltype(__k)>
++ && (is_same_v<_Op<void>, __increment<void>>
++ || is_same_v<_Op<void>, __decrement<void>>))
++ {
++ // optimize masked unary increment and decrement as masked sub +/-1
++ constexpr int __pm_one
++ = is_same_v<_Op<void>, __increment<void>> ? -1 : 1;
++#ifdef __clang__
++ return __movm<_Np, _Tp>(__k._M_data) ? __v._M_data - __pm_one : __v._M_data;
++#else // __clang__
++ if constexpr (is_integral_v<_Tp>)
++ {
++ constexpr bool __lp64 = sizeof(long) == sizeof(long long);
++ using _Ip = std::make_signed_t<_Tp>;
++ using _Up = std::conditional_t<
++ std::is_same_v<_Ip, long>,
++ std::conditional_t<__lp64, long long, int>,
++ std::conditional_t<
++ std::is_same_v<_Ip, signed char>, char, _Ip>>;
++ const auto __value = __vector_bitcast<_Up>(__v._M_data);
++#define _GLIBCXX_SIMD_MASK_SUB(_Sizeof, _Width, _Instr) \
++ if constexpr (sizeof(_Tp) == _Sizeof && sizeof(__v) == _Width) \
++ return __vector_bitcast<_Tp>(__builtin_ia32_##_Instr##_mask(__value, \
++ __vector_broadcast<_Np>(_Up(__pm_one)), __value, __k._M_data))
++ _GLIBCXX_SIMD_MASK_SUB(1, 64, psubb512);
++ _GLIBCXX_SIMD_MASK_SUB(1, 32, psubb256);
++ _GLIBCXX_SIMD_MASK_SUB(1, 16, psubb128);
++ _GLIBCXX_SIMD_MASK_SUB(2, 64, psubw512);
++ _GLIBCXX_SIMD_MASK_SUB(2, 32, psubw256);
++ _GLIBCXX_SIMD_MASK_SUB(2, 16, psubw128);
++ _GLIBCXX_SIMD_MASK_SUB(4, 64, psubd512);
++ _GLIBCXX_SIMD_MASK_SUB(4, 32, psubd256);
++ _GLIBCXX_SIMD_MASK_SUB(4, 16, psubd128);
++ _GLIBCXX_SIMD_MASK_SUB(8, 64, psubq512);
++ _GLIBCXX_SIMD_MASK_SUB(8, 32, psubq256);
++ _GLIBCXX_SIMD_MASK_SUB(8, 16, psubq128);
++#undef _GLIBCXX_SIMD_MASK_SUB
++ }
++ else
++ {
++#define _GLIBCXX_SIMD_MASK_SUB(_Sizeof, _Width, _Instr) \
++ if constexpr (sizeof(_Tp) == _Sizeof && sizeof(__v) == _Width) \
++ return __builtin_ia32_##_Instr##_mask( \
++ __v._M_data, __vector_broadcast<_Np>(_Tp(__pm_one)), __v._M_data, \
++ __k._M_data, _MM_FROUND_CUR_DIRECTION)
++ _GLIBCXX_SIMD_MASK_SUB(4, 64, subps512);
++ _GLIBCXX_SIMD_MASK_SUB(4, 32, subps256);
++ _GLIBCXX_SIMD_MASK_SUB(4, 16, subps128);
++ _GLIBCXX_SIMD_MASK_SUB(8, 64, subpd512);
++ _GLIBCXX_SIMD_MASK_SUB(8, 32, subpd256);
++ _GLIBCXX_SIMD_MASK_SUB(8, 16, subpd128);
++#undef _GLIBCXX_SIMD_MASK_SUB
++ }
++#endif // __clang__
++ }
++ else
++ return _Base::template _S_masked_unary<_Op>(__k, __v);
++ }
+ };
+
+ // }}}
+@@ -3471,8 +3599,8 @@ struct _MaskImplX86Mixin
+
+ // _S_to_maskvector(bool) {{{
+ template <typename _Up, size_t _ToN = 1, typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static constexpr enable_if_t<
+- is_same_v<_Tp, bool>, _SimdWrapper<_Up, _ToN>>
++ _GLIBCXX_SIMD_INTRINSIC static constexpr
++ enable_if_t<is_same_v<_Tp, bool>, _SimdWrapper<_Up, _ToN>>
+ _S_to_maskvector(_Tp __x)
+ {
+ static_assert(is_same_v<_Up, __int_for_sizeof_t<_Up>>);
+@@ -3482,8 +3610,7 @@ struct _MaskImplX86Mixin
+
+ // }}}
+ // _S_to_maskvector(_SanitizedBitMask) {{{
+- template <typename _Up, size_t _UpN = 0, size_t _Np,
+- size_t _ToN = _UpN == 0 ? _Np : _UpN>
++ template <typename _Up, size_t _UpN = 0, size_t _Np, size_t _ToN = _UpN == 0 ? _Np : _UpN>
+ _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdWrapper<_Up, _ToN>
+ _S_to_maskvector(_SanitizedBitMask<_Np> __x)
+ {
+@@ -3495,7 +3622,7 @@ struct _MaskImplX86Mixin
+ return _S_to_maskvector<_Up, _ToN>(__k);
+ else if (__x._M_is_constprop() || __builtin_is_constant_evaluated())
+ return __generate_from_n_evaluations<std::min(_ToN, _Np), _UV>(
+- [&](auto __i) -> _Up { return -__x[__i.value]; });
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA -> _Up { return -__x[__i.value]; });
+ else if constexpr (sizeof(_Up) == 1)
+ {
+ if constexpr (sizeof(_UI) == 16)
+@@ -3740,9 +3867,9 @@ struct _MaskImplX86Mixin
+ else if constexpr (__bits_per_element >= _ToN)
+ {
+ constexpr auto __bitmask
+- = __generate_vector<_V>([](auto __i) constexpr->_UpUInt {
+- return __i < _ToN ? 1ull << __i : 0;
+- });
++ = __generate_vector<_V>([](auto __i)
++ constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA -> _UpUInt
++ { return __i < _ToN ? 1ull << __i : 0; });
+ const auto __bits
+ = __vector_broadcast<_ToN, _UpUInt>(__k) & __bitmask;
+ if constexpr (__bits_per_element > _ToN)
+@@ -3753,11 +3880,11 @@ struct _MaskImplX86Mixin
+ else
+ {
+ const _V __tmp
+- = __generate_vector<_V>([&](auto __i) constexpr {
++ = __generate_vector<_V>([&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ return static_cast<_UpUInt>(
+ __k >> (__bits_per_element * (__i / __bits_per_element)));
+ })
+- & __generate_vector<_V>([](auto __i) constexpr {
++ & __generate_vector<_V>([](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ return static_cast<_UpUInt>(1ull
+ << (__i % __bits_per_element));
+ }); // mask bit index
+@@ -3793,7 +3920,7 @@ struct _MaskImplX86Mixin
+ const auto __y = __vector_bitcast<__int_for_sizeof_t<_Tp>>(__x);
+ return __generate_from_n_evaluations<std::min(_ToN, _Np),
+ __vector_type_t<_Up, _ToN>>(
+- [&](auto __i) -> _Up { return __y[__i.value]; });
++ [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA -> _Up { return __y[__i.value]; });
+ }
+ using _To = __vector_type_t<_Up, _ToN>;
+ [[maybe_unused]] constexpr size_t _FromN = _Np;
+@@ -4128,8 +4255,11 @@ struct _MaskImplX86Mixin
+ {
+ const auto __bools = -__x._M_data;
+ const _ULLong __k = __call_with_n_evaluations<_Np>(
+- [](auto... __bits) { return (__bits | ...); },
+- [&](auto __i) { return _ULLong(__bools[+__i]) << __i; });
++ [](auto... __bits) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return (__bits | ...);
++ }, [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
++ return _ULLong(__bools[+__i]) << __i;
++ });
+ if (__builtin_is_constant_evaluated()
+ || __builtin_constant_p(__k))
+ return __k;
+@@ -4283,15 +4413,28 @@ template <typename _Abi, typename>
+ _S_load(const bool* __mem)
+ {
+ static_assert(is_same_v<_Tp, __int_for_sizeof_t<_Tp>>);
+- if constexpr (__have_avx512bw)
++ if (__builtin_is_constant_evaluated())
+ {
+- const auto __to_vec_or_bits = [](auto __bits) -> decltype(auto) {
+- if constexpr (__is_avx512_abi<_Abi>())
+- return __bits;
+- else
+- return _S_to_maskvector<_Tp>(
+- _BitMask<_S_size<_Tp>>(__bits)._M_sanitized());
+- };
++ if constexpr (__is_avx512_abi<_Abi>())
++ {
++ _MaskMember<_Tp> __r{};
++ for (size_t __i = 0; __i < _S_size<_Tp>; ++__i)
++ __r._M_data |= _ULLong(__mem[__i]) << __i;
++ return __r;
++ }
++ else
++ return _Base::template _S_load<_Tp>(__mem);
++ }
++ else if constexpr (__have_avx512bw)
++ {
++ const auto __to_vec_or_bits
++ = [](auto __bits) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA -> decltype(auto) {
++ if constexpr (__is_avx512_abi<_Abi>())
++ return __bits;
++ else
++ return _S_to_maskvector<_Tp>(
++ _BitMask<_S_size<_Tp>>(__bits)._M_sanitized());
++ };
+
+ if constexpr (_S_size<_Tp> <= 16 && __have_avx512vl)
+ {
+@@ -4478,7 +4621,7 @@ template <typename _Abi, typename>
+ }
+ else
+ {
+- _BitOps::_S_bit_iteration(__mask, [&](auto __i) {
++ _BitOps::_S_bit_iteration(__mask, [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ __merge._M_set(__i, __mem[__i]);
+ });
+ return __merge;
+@@ -4550,14 +4693,16 @@ template <typename _Abi, typename>
+
+ // _S_store {{{2
+ template <typename _Tp, size_t _Np>
+- _GLIBCXX_SIMD_INTRINSIC static void _S_store(_SimdWrapper<_Tp, _Np> __v,
+- bool* __mem) noexcept
++ _GLIBCXX_SIMD_INTRINSIC static constexpr void
++ _S_store(_SimdWrapper<_Tp, _Np> __v, bool* __mem) noexcept
+ {
+- if constexpr (__is_avx512_abi<_Abi>())
++ if (__builtin_is_constant_evaluated())
++ _Base::_S_store(__v, __mem);
++ else if constexpr (__is_avx512_abi<_Abi>())
+ {
+ if constexpr (__have_avx512bw_vl)
+ _CommonImplX86::_S_store<_Np>(
+- __vector_bitcast<char>([](auto __data) {
++ __vector_bitcast<char>([](auto __data) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
+ if constexpr (_Np <= 16)
+ return _mm_maskz_set1_epi8(__data, 1);
+ else if constexpr (_Np <= 32)
+@@ -4635,7 +4780,7 @@ template <typename _Abi, typename>
+ if constexpr (_Np <= 4 && sizeof(_Tp) == 8)
+ {
+ auto __k = __intrin_bitcast<__m256i>(__to_intrin(__v));
+- int __bool4;
++ int __bool4{};
+ if constexpr (__have_avx2)
+ __bool4 = _mm256_movemask_epi8(__k);
+ else
+@@ -4715,12 +4860,13 @@ template <typename _Abi, typename>
+ // logical and bitwise operators {{{2
+ template <typename _Tp, size_t _Np>
+ _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdWrapper<_Tp, _Np>
+- _S_logical_and(const _SimdWrapper<_Tp, _Np>& __x,
+- const _SimdWrapper<_Tp, _Np>& __y)
++ _S_logical_and(const _SimdWrapper<_Tp, _Np>& __x, const _SimdWrapper<_Tp, _Np>& __y)
+ {
+ if constexpr (is_same_v<_Tp, bool>)
+ {
+- if constexpr (__have_avx512dq && _Np <= 8)
++ if (__builtin_is_constant_evaluated())
++ return __x._M_data & __y._M_data;
++ else if constexpr (__have_avx512dq && _Np <= 8)
+ return _kand_mask8(__x._M_data, __y._M_data);
+ else if constexpr (_Np <= 16)
+ return _kand_mask16(__x._M_data, __y._M_data);
+@@ -4737,12 +4883,13 @@ template <typename _Abi, typename>
+
+ template <typename _Tp, size_t _Np>
+ _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdWrapper<_Tp, _Np>
+- _S_logical_or(const _SimdWrapper<_Tp, _Np>& __x,
+- const _SimdWrapper<_Tp, _Np>& __y)
++ _S_logical_or(const _SimdWrapper<_Tp, _Np>& __x, const _SimdWrapper<_Tp, _Np>& __y)
+ {
+ if constexpr (is_same_v<_Tp, bool>)
+ {
+- if constexpr (__have_avx512dq && _Np <= 8)
++ if (__builtin_is_constant_evaluated())
++ return __x._M_data | __y._M_data;
++ else if constexpr (__have_avx512dq && _Np <= 8)
+ return _kor_mask8(__x._M_data, __y._M_data);
+ else if constexpr (_Np <= 16)
+ return _kor_mask16(__x._M_data, __y._M_data);
+@@ -4763,7 +4910,9 @@ template <typename _Abi, typename>
+ {
+ if constexpr (is_same_v<_Tp, bool>)
+ {
+- if constexpr (__have_avx512dq && _Np <= 8)
++ if (__builtin_is_constant_evaluated())
++ return __x._M_data ^ _Abi::template __implicit_mask_n<_Np>();
++ else if constexpr (__have_avx512dq && _Np <= 8)
+ return _kandn_mask8(__x._M_data,
+ _Abi::template __implicit_mask_n<_Np>());
+ else if constexpr (_Np <= 16)
+@@ -4784,12 +4933,13 @@ template <typename _Abi, typename>
+
+ template <typename _Tp, size_t _Np>
+ _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdWrapper<_Tp, _Np>
+- _S_bit_and(const _SimdWrapper<_Tp, _Np>& __x,
+- const _SimdWrapper<_Tp, _Np>& __y)
++ _S_bit_and(const _SimdWrapper<_Tp, _Np>& __x, const _SimdWrapper<_Tp, _Np>& __y)
+ {
+ if constexpr (is_same_v<_Tp, bool>)
+ {
+- if constexpr (__have_avx512dq && _Np <= 8)
++ if (__builtin_is_constant_evaluated())
++ return __x._M_data & __y._M_data;
++ else if constexpr (__have_avx512dq && _Np <= 8)
+ return _kand_mask8(__x._M_data, __y._M_data);
+ else if constexpr (_Np <= 16)
+ return _kand_mask16(__x._M_data, __y._M_data);
+@@ -4806,12 +4956,13 @@ template <typename _Abi, typename>
+
+ template <typename _Tp, size_t _Np>
+ _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdWrapper<_Tp, _Np>
+- _S_bit_or(const _SimdWrapper<_Tp, _Np>& __x,
+- const _SimdWrapper<_Tp, _Np>& __y)
++ _S_bit_or(const _SimdWrapper<_Tp, _Np>& __x, const _SimdWrapper<_Tp, _Np>& __y)
+ {
+ if constexpr (is_same_v<_Tp, bool>)
+ {
+- if constexpr (__have_avx512dq && _Np <= 8)
++ if (__builtin_is_constant_evaluated())
++ return __x._M_data | __y._M_data;
++ else if constexpr (__have_avx512dq && _Np <= 8)
+ return _kor_mask8(__x._M_data, __y._M_data);
+ else if constexpr (_Np <= 16)
+ return _kor_mask16(__x._M_data, __y._M_data);
+@@ -4828,12 +4979,13 @@ template <typename _Abi, typename>
+
+ template <typename _Tp, size_t _Np>
+ _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdWrapper<_Tp, _Np>
+- _S_bit_xor(const _SimdWrapper<_Tp, _Np>& __x,
+- const _SimdWrapper<_Tp, _Np>& __y)
++ _S_bit_xor(const _SimdWrapper<_Tp, _Np>& __x, const _SimdWrapper<_Tp, _Np>& __y)
+ {
+ if constexpr (is_same_v<_Tp, bool>)
+ {
+- if constexpr (__have_avx512dq && _Np <= 8)
++ if (__builtin_is_constant_evaluated())
++ return __x._M_data ^ __y._M_data;
++ else if constexpr (__have_avx512dq && _Np <= 8)
+ return _kxor_mask8(__x._M_data, __y._M_data);
+ else if constexpr (_Np <= 16)
+ return _kxor_mask16(__x._M_data, __y._M_data);
+@@ -4853,8 +5005,7 @@ template <typename _Abi, typename>
+ template <size_t _Np>
+ _GLIBCXX_SIMD_INTRINSIC static void
+ _S_masked_assign(_SimdWrapper<bool, _Np> __k,
+- _SimdWrapper<bool, _Np>& __lhs,
+- _SimdWrapper<bool, _Np> __rhs)
++ _SimdWrapper<bool, _Np>& __lhs, _SimdWrapper<bool, _Np> __rhs)
+ {
+ __lhs._M_data
+ = (~__k._M_data & __lhs._M_data) | (__k._M_data & __rhs._M_data);
+@@ -4876,7 +5027,8 @@ template <typename _Abi, typename>
+ //}}}
+ // _S_all_of {{{
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static bool _S_all_of(simd_mask<_Tp, _Abi> __k)
++ _GLIBCXX_SIMD_INTRINSIC static bool
++ _S_all_of(simd_mask<_Tp, _Abi> __k)
+ {
+ if constexpr (__is_sse_abi<_Abi>() || __is_avx_abi<_Abi>())
+ {
+@@ -4932,7 +5084,8 @@ template <typename _Abi, typename>
+ // }}}
+ // _S_any_of {{{
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static bool _S_any_of(simd_mask<_Tp, _Abi> __k)
++ _GLIBCXX_SIMD_INTRINSIC static bool
++ _S_any_of(simd_mask<_Tp, _Abi> __k)
+ {
+ if constexpr (__is_sse_abi<_Abi>() || __is_avx_abi<_Abi>())
+ {
+@@ -4967,7 +5120,8 @@ template <typename _Abi, typename>
+ // }}}
+ // _S_none_of {{{
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static bool _S_none_of(simd_mask<_Tp, _Abi> __k)
++ _GLIBCXX_SIMD_INTRINSIC static bool
++ _S_none_of(simd_mask<_Tp, _Abi> __k)
+ {
+ if constexpr (__is_sse_abi<_Abi>() || __is_avx_abi<_Abi>())
+ {
+@@ -5002,7 +5156,8 @@ template <typename _Abi, typename>
+ // }}}
+ // _S_some_of {{{
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static bool _S_some_of(simd_mask<_Tp, _Abi> __k)
++ _GLIBCXX_SIMD_INTRINSIC static bool
++ _S_some_of(simd_mask<_Tp, _Abi> __k)
+ {
+ if constexpr (__is_sse_abi<_Abi>() || __is_avx_abi<_Abi>())
+ {
+@@ -5043,7 +5198,8 @@ template <typename _Abi, typename>
+ // }}}
+ // _S_popcount {{{
+ template <typename _Tp>
+- _GLIBCXX_SIMD_INTRINSIC static int _S_popcount(simd_mask<_Tp, _Abi> __k)
++ _GLIBCXX_SIMD_INTRINSIC static int
++ _S_popcount(simd_mask<_Tp, _Abi> __k)
+ {
+ constexpr size_t _Np = simd_size_v<_Tp, _Abi>;
+ const auto __kk = _Abi::_S_masked(__k._M_data)._M_data;
+--- a/src/libstdc++-v3/include/std/array
++++ b/src/libstdc++-v3/include/std/array
+@@ -446,7 +446,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
+ static_assert(!is_array_v<_Tp>);
+ static_assert(is_constructible_v<_Tp, _Tp&>);
+ if constexpr (is_constructible_v<_Tp, _Tp&>)
+- return __to_array(__a, make_index_sequence<_Nm>{});
++ return std::__to_array(__a, make_index_sequence<_Nm>{});
+ __builtin_unreachable(); // FIXME: see PR c++/91388
+ }
+
+@@ -459,7 +459,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
+ static_assert(!is_array_v<_Tp>);
+ static_assert(is_move_constructible_v<_Tp>);
+ if constexpr (is_move_constructible_v<_Tp>)
+- return __to_array<1>(__a, make_index_sequence<_Nm>{});
++ return std::__to_array<1>(__a, make_index_sequence<_Nm>{});
+ __builtin_unreachable(); // FIXME: see PR c++/91388
+ }
+ #endif // C++20
+--- a/src/libstdc++-v3/include/std/charconv
++++ b/src/libstdc++-v3/include/std/charconv
+@@ -236,7 +236,7 @@ namespace __detail
+ static_assert(is_unsigned<_Tp>::value, "implementation bug");
+
+ to_chars_result __res;
+- unsigned __len;
++ unsigned __len = 0;
+
+ if _GLIBCXX17_CONSTEXPR (__gnu_cxx::__int_traits<_Tp>::__digits <= 16)
+ {
+--- a/src/libstdc++-v3/python/libstdcxx/v6/printers.py
++++ b/src/libstdc++-v3/python/libstdcxx/v6/printers.py
+@@ -1216,9 +1216,34 @@ class StdExpAnyPrinter(SingleObjContainerPrinter):
+ mgrname = m.group(1)
+ # FIXME need to expand 'std::string' so that gdb.lookup_type works
+ if 'std::string' in mgrname:
+- mgrname = re.sub("std::string(?!\w)", str(gdb.lookup_type('std::string').strip_typedefs()), m.group(1))
+-
+- mgrtype = gdb.lookup_type(mgrname)
++ # This lookup for std::string might return the __cxx11 version,
++ # but that's not necessarily the one used by the std::any
++ # manager function we're trying to find.
++ strings = {str(gdb.lookup_type('std::string').strip_typedefs())}
++ # So also consider all the other possible std::string types!
++ s = 'basic_string<char, std::char_traits<char>, std::allocator<char> >'
++ quals = ['std::', 'std::__cxx11::', 'std::' + _versioned_namespace]
++ strings |= {q+s for q in quals} # set of unique strings
++ mgrtypes = []
++ for s in strings:
++ try:
++ x = re.sub("std::string(?!\w)", s, m.group(1))
++ # The following lookup might raise gdb.error if the
++ # manager function was never instantiated for 's' in the
++ # program, because there will be no such type.
++ mgrtypes.append(gdb.lookup_type(x))
++ except gdb.error:
++ pass
++ if len(mgrtypes) != 1:
++ # FIXME: this is unlikely in practice, but possible for
++ # programs that use both old and new string types with
++ # std::any in a single program. Can we do better?
++ # Maybe find the address of each type's _S_manage and
++ # compare to the address stored in _M_manager?
++ raise ValueError('Cannot uniquely determine std::string type used in std::any')
++ mgrtype = mgrtypes[0]
++ else:
++ mgrtype = gdb.lookup_type(mgrname)
+ self.contained_type = mgrtype.template_argument(0)
+ valptr = None
+ if '::_Manager_internal' in mgrname:
+--- a/src/libstdc++-v3/src/c++17/memory_resource.cc
++++ b/src/libstdc++-v3/src/c++17/memory_resource.cc
+@@ -506,7 +506,7 @@ namespace pmr
+ }
+
+ // Allocated size of chunk:
+- uint32_t _M_bytes = 0;
++ bitset::size_type _M_bytes = 0;
+ // Start of allocated chunk:
+ std::byte* _M_p = nullptr;
+
+@@ -580,7 +580,7 @@ namespace pmr
+ // For 16-bit pointers it's five pointers (10 bytes).
+ // TODO pad 64-bit to 4*sizeof(void*) to avoid splitting across cache lines?
+ static_assert(sizeof(chunk)
+- == sizeof(bitset::size_type) + sizeof(uint32_t) + 2 * sizeof(void*));
++ == 2 * sizeof(bitset::size_type) + 2 * sizeof(void*));
+
+ // An oversized allocation that doesn't fit in a pool.
+ struct big_block
+@@ -735,7 +735,7 @@ namespace pmr
+ _M_blocks_per_chunk = std::min({
+ max_blocks,
+ __opts.max_blocks_per_chunk,
+- (size_t)_M_blocks_per_chunk * 2
++ size_t(_M_blocks_per_chunk * 2)
+ });
+ }
+ }
+@@ -1058,7 +1058,8 @@ namespace pmr
+ // Decide on initial number of blocks per chunk.
+ // At least 16 blocks per chunk seems reasonable,
+ // more for smaller blocks:
+- size_t blocks_per_chunk = std::max(size_t(16), 1024 / block_size);
++ size_t blocks_per_chunk = 1024 / block_size;
++ blocks_per_chunk = std::max(size_t(16), blocks_per_chunk);
+ // But don't exceed the requested max_blocks_per_chunk:
+ blocks_per_chunk
+ = std::min(blocks_per_chunk, _M_opts.max_blocks_per_chunk);
+--- a/src/libstdc++-v3/src/filesystem/ops-common.h
++++ b/src/libstdc++-v3/src/filesystem/ops-common.h
+@@ -84,7 +84,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
+ inline error_code
+ __unsupported() noexcept
+ {
+-#if defined ENOTSUP
++#if defined __AVR__
++ // avr-libc defines ENOTSUP and EOPNOTSUPP but with nonsense values.
++ // ENOSYS is defined though, so use an error_code corresponding to that.
++ // This contradicts the comment above, but we don't have much choice.
++ return std::make_error_code(std::errc::function_not_supported);
++#elif defined ENOTSUP
+ return std::make_error_code(std::errc::not_supported);
+ #elif defined EOPNOTSUPP
+ // This is supposed to be for socket operations
+@@ -452,25 +457,26 @@ _GLIBCXX_BEGIN_NAMESPACE_FILESYSTEM
+ int fd;
+ };
+
+- int iflag = O_RDONLY;
++ int common_flags = 0;
++#ifdef O_CLOEXEC
++ common_flags |= O_CLOEXEC;
++#endif
+ #ifdef _GLIBCXX_FILESYSTEM_IS_WINDOWS
+- iflag |= O_BINARY;
++ common_flags |= O_BINARY;
+ #endif
+
++ const int iflag = O_RDONLY | common_flags;
+ CloseFD in = { posix::open(from, iflag) };
+ if (in.fd == -1)
+ {
+ ec.assign(errno, std::generic_category());
+ return false;
+ }
+- int oflag = O_WRONLY|O_CREAT;
++ int oflag = O_WRONLY | O_CREAT | common_flags;
+ if (options.overwrite || options.update)
+ oflag |= O_TRUNC;
+ else
+ oflag |= O_EXCL;
+-#ifdef _GLIBCXX_FILESYSTEM_IS_WINDOWS
+- oflag |= O_BINARY;
+-#endif
+ CloseFD out = { posix::open(to, oflag, S_IWUSR) };
+ if (out.fd == -1)
+ {
+@@ -495,25 +501,29 @@ _GLIBCXX_BEGIN_NAMESPACE_FILESYSTEM
+
+ size_t count = from_st->st_size;
+ #if defined _GLIBCXX_USE_SENDFILE && ! defined _GLIBCXX_FILESYSTEM_IS_WINDOWS
+- off_t offset = 0;
+- ssize_t n = ::sendfile(out.fd, in.fd, &offset, count);
+- if (n < 0 && errno != ENOSYS && errno != EINVAL)
+- {
+- ec.assign(errno, std::generic_category());
+- return false;
+- }
+- if ((size_t)n == count)
++ ssize_t n = 0;
++ if (count != 0)
+ {
+- if (!out.close() || !in.close())
++ off_t offset = 0;
++ n = ::sendfile(out.fd, in.fd, &offset, count);
++ if (n < 0 && errno != ENOSYS && errno != EINVAL)
+ {
+ ec.assign(errno, std::generic_category());
+ return false;
+ }
+- ec.clear();
+- return true;
++ if ((size_t)n == count)
++ {
++ if (!out.close() || !in.close())
++ {
++ ec.assign(errno, std::generic_category());
++ return false;
++ }
++ ec.clear();
++ return true;
++ }
++ else if (n > 0)
++ count -= n;
+ }
+- else if (n > 0)
+- count -= n;
+ #endif // _GLIBCXX_USE_SENDFILE
+
+ using std::ios;
+@@ -543,11 +553,17 @@ _GLIBCXX_BEGIN_NAMESPACE_FILESYSTEM
+ }
+ #endif
+
+- if (count && !(std::ostream(&sbout) << &sbin))
+- {
+- ec = std::make_error_code(std::errc::io_error);
+- return false;
+- }
++ // ostream::operator<<(streambuf*) fails if it extracts no characters,
++ // so don't try to use it for empty files. But from_st->st_size == 0 for
++ // some special files (e.g. procfs, see PR libstdc++/108178) so just try
++ // to read a character to decide whether there is anything to copy or not.
++ if (sbin.sgetc() != char_traits<char>::eof())
++ if (!(std::ostream(&sbout) << &sbin))
++ {
++ ec = std::make_error_code(std::errc::io_error);
++ return false;
++ }
++
+ if (!sbout.close() || !sbin.close())
+ {
+ ec.assign(errno, std::generic_category());
+@@ -620,7 +636,8 @@ _GLIBCXX_BEGIN_NAMESPACE_FILESYSTEM
+ {
+ buf.resize(len);
+ len = GetTempPathW(buf.size(), buf.data());
+- } while (len > buf.size());
++ }
++ while (len > buf.size());
+
+ if (len == 0)
+ ec = __last_system_error();
+--- a/src/libstdc++-v3/testsuite/20_util/from_chars/4.cc
++++ b/src/libstdc++-v3/testsuite/20_util/from_chars/4.cc
+@@ -18,6 +18,7 @@
+ // <charconv> is supported in C++14 as a GNU extension
+ // { dg-do run { target c++14 } }
+ // { dg-add-options ieee }
++// { dg-additional-options "-DSKIP_LONG_DOUBLE" { target aarch64-*-vxworks* x86_64-*-vxworks* } }
+
+ #include <charconv>
+ #include <string>
+@@ -354,7 +355,7 @@ test06()
+ {
+ test_max_mantissa<float, unsigned long>();
+ test_max_mantissa<double, unsigned long long>();
+-#ifdef __GLIBCXX_TYPE_INT_N_0
++#if defined __GLIBCXX_TYPE_INT_N_0 && !defined SKIP_LONG_DOUBLE
+ test_max_mantissa<long double, unsigned __GLIBCXX_TYPE_INT_N_0>();
+ #endif
+ }
+new file mode 100644
+--- /dev/null
++++ b/src/libstdc++-v3/testsuite/20_util/integer_sequence/pr111357.cc
+@@ -0,0 +1,34 @@
++// { dg-do compile { target c++14 } }
++
++// PR c++/111357 - __integer_pack fails to work with values of dependent type
++// convertible to integers in noexcept context
++
++#include <utility>
++
++using std::integer_sequence;
++using std::make_integer_sequence;
++
++template<int... V>
++void g(integer_sequence<int,V...>)
++{}
++
++template<typename ...T>
++struct c1
++{
++ static constexpr int value = 1;
++ constexpr operator int() { return value; }
++};
++
++template<typename T>
++struct R
++{
++ using S = make_integer_sequence<int,c1<T>{}>;
++
++ R() noexcept(noexcept(g(S()))) // { dg-bogus "argument to .__integer_pack." }
++ {}
++};
++
++int main()
++{
++ R<int>();
++}
+--- a/src/libstdc++-v3/testsuite/20_util/to_chars/long_double.cc
++++ b/src/libstdc++-v3/testsuite/20_util/to_chars/long_double.cc
+@@ -34,6 +34,10 @@
+ // more portable and robust to differences in system printf behavior.
+ // { dg-xfail-run-if "Non-conforming printf (see PR98384)" { *-*-solaris* *-*-darwin* } }
+
++// On systems that use double-precision from_chars for long double,
++// this is expected to fail.
++// { dg-xfail-run-if "from_chars limited to double-precision" { aarch64-*-vxworks* i*86-*-vxworks* x86_64-*-vxworks* } }
++
+ // { dg-require-effective-target ieee_floats }
+ // { dg-require-effective-target size32plus }
+
+new file mode 100644
+--- /dev/null
++++ b/src/libstdc++-v3/testsuite/23_containers/array/creation/111512.cc
+@@ -0,0 +1,25 @@
++// { dg-options "-std=gnu++20" }
++// { dg-do compile { target c++20 } }
++
++// Bug libstdc++/111511 - Incorrect ADL in std::to_array in GCC 11/12/13
++// Bug c++/111512 - GCC's __builtin_memcpy can trigger ADL
++
++#include <array>
++#include <utility>
++
++struct incomplete;
++
++template<class T>
++struct holder {
++ T t; // { dg-bogus "'holder<T>::t' has incomplete type" }
++};
++
++// A complete type that cannot be used as an associated type for ADL.
++using adl_bomb = holder<incomplete>*;
++
++int main()
++{
++ adl_bomb a[1]{};
++ (void) std::to_array(a);
++ (void) std::to_array(std::move(a));
++}
+new file mode 100644
+--- /dev/null
++++ b/src/libstdc++-v3/testsuite/27_io/filesystem/operations/copy_file_108178.cc
+@@ -0,0 +1,33 @@
++// { dg-do run { target c++17 } }
++// { dg-require-filesystem-ts "" }
++
++// C++17 30.10.15.4 Copy [fs.op.copy_file]
++
++#include <filesystem>
++#include <fstream>
++#include <unistd.h> // getpid
++#include <testsuite_fs.h>
++#include <testsuite_hooks.h>
++
++namespace fs = std::filesystem;
++
++void
++test_procfs() // PR libstdc++/108178
++{
++ auto pid = ::getpid();
++ std::string from = "/proc/" + std::to_string(pid) + "/status";
++ if (fs::exists(from))
++ {
++ auto to = __gnu_test::nonexistent_path();
++ fs::copy_file(from, to);
++ std::ifstream f(to);
++ VERIFY(f.is_open());
++ VERIFY(f.peek() != std::char_traits<char>::eof());
++ fs::remove(to);
++ }
++}
++
++int main()
++{
++ test_procfs();
++}
+--- a/src/libstdc++-v3/testsuite/27_io/filesystem/path/108636.cc
++++ b/src/libstdc++-v3/testsuite/27_io/filesystem/path/108636.cc
+@@ -1,5 +1,6 @@
+ // { dg-do link { target c++17 } }
+ // { dg-options "-fkeep-inline-functions" }
++// { dg-require-filesystem-ts "" }
+
+ #include <filesystem>
+ int main()
+--- a/src/libstdc++-v3/testsuite/27_io/filesystem/path/construct/95048.cc
++++ b/src/libstdc++-v3/testsuite/27_io/filesystem/path/construct/95048.cc
+@@ -16,6 +16,8 @@ test_wide()
+ VERIFY( CHECK(L, "\U0001F4C1") ); // folder
+ VERIFY( CHECK(L, "\U0001F4C2") ); // open folder
+ VERIFY( CHECK(L, "\U0001F4C4") ); // filing cabient
++
++ VERIFY( path(u8"\U0001D11E").wstring() == L"\U0001D11E" ); // G Clef
+ }
+
+ void
+@@ -25,6 +27,8 @@ test_u16()
+ VERIFY( CHECK(u, "\U0001F4C1") ); // folder
+ VERIFY( CHECK(u, "\U0001F4C2") ); // open folder
+ VERIFY( CHECK(u, "\U0001F4C4") ); // filing cabient
++
++ VERIFY( path(u8"\U0001D11E").u16string() == u"\U0001D11E" ); // G Clef
+ }
+
+ void
+@@ -34,6 +38,8 @@ test_u32()
+ VERIFY( CHECK(U, "\U0001F4C1") ); // folder
+ VERIFY( CHECK(U, "\U0001F4C2") ); // open folder
+ VERIFY( CHECK(U, "\U0001F4C4") ); // filing cabient
++
++ VERIFY( path(u8"\U0001D11E").u32string() == U"\U0001D11E" ); // G Clef
+ }
+
+ int
+--- a/src/libstdc++-v3/testsuite/experimental/filesystem/path/construct/95048.cc
++++ b/src/libstdc++-v3/testsuite/experimental/filesystem/path/construct/95048.cc
+@@ -18,6 +18,8 @@ test_wide()
+ VERIFY( CHECK(L, "\U0001F4C1") ); // folder
+ VERIFY( CHECK(L, "\U0001F4C2") ); // open folder
+ VERIFY( CHECK(L, "\U0001F4C4") ); // filing cabient
++
++ VERIFY( path(u8"\U0001D11E").wstring() == L"\U0001D11E" ); // G Clef
+ }
+
+ void
+@@ -27,6 +29,8 @@ test_u16()
+ VERIFY( CHECK(u, "\U0001F4C1") ); // folder
+ VERIFY( CHECK(u, "\U0001F4C2") ); // open folder
+ VERIFY( CHECK(u, "\U0001F4C4") ); // filing cabient
++
++ VERIFY( path(u8"\U0001D11E").u16string() == u"\U0001D11E" ); // G Clef
+ }
+
+ void
+@@ -36,6 +40,8 @@ test_u32()
+ VERIFY( CHECK(U, "\U0001F4C1") ); // folder
+ VERIFY( CHECK(U, "\U0001F4C2") ); // open folder
+ VERIFY( CHECK(U, "\U0001F4C4") ); // filing cabient
++
++ VERIFY( path(u8"\U0001D11E").u32string() == U"\U0001D11E" ); // G Clef
+ }
+
+ int
+new file mode 100644
+--- /dev/null
++++ b/src/libstdc++-v3/testsuite/experimental/simd/pr109261_constexpr_simd.cc
+@@ -0,0 +1,92 @@
++// { dg-options "-std=gnu++17" }
++// { dg-do compile { target c++17 } }
++// { dg-require-cmath "" }
++
++#include <experimental/simd>
++
++namespace stdx = std::experimental;
++
++template <typename T, typename V>
++ void
++ test01()
++ {
++ constexpr T data[V::size()] = {};
++ constexpr auto a = V(data, stdx::element_aligned);
++
++ constexpr auto b = []() constexpr {
++ V x = T(1);
++ where(x > T(), x) = T();
++ where(x < T(), x) += T();
++ where(x >= T(), x) -= T();
++ where(x <= T(), x) *= T();
++ where(x == T(), x) /= T(1);
++ where(x != T(), x) += T(1);
++ return x;
++ }();
++
++ constexpr T c = V()[0];
++
++ constexpr auto d = !V() && !!V() || !V() & !V() | !V() ^ !V();
++
++ constexpr auto e = []() constexpr {
++ T data[V::size()] = {};
++ V(T(1)).copy_to(data, stdx::element_aligned);
++ V x = T();
++ x[0] = T(1);
++ x.copy_from(data, stdx::element_aligned);
++ bool mask[V::size()] = {};
++ auto k = hmin(x + x - x * x) == x / x;
++ k.copy_to(mask, stdx::element_aligned);
++ mask[0] = false;
++ using M = typename V::mask_type;
++ return M(mask, stdx::element_aligned);
++ }();
++
++ static_assert(not e[0]);
++ static_assert(popcount(e) == V::size() - 1);
++
++ static_assert(all_of(V(T(1)) == []() constexpr {
++ float data[V::size()] = {};
++ V(T(1)).copy_to(data, stdx::element_aligned);
++ V x = T();
++ x.copy_from(data, stdx::element_aligned);
++ return x;
++ }()));
++
++ static_assert(hmin(V()) == T());
++ static_assert(hmax(V()) == T());
++ static_assert(reduce(V(1)) == T(V::size()));
++ }
++
++template <typename T>
++ void
++ iterate_abis()
++ {
++ test01<T, stdx::simd<T, stdx::simd_abi::scalar>>();
++ test01<T, stdx::simd<T>>();
++ test01<T, stdx::native_simd<T>>();
++ test01<T, stdx::fixed_size_simd<T, 3>>();
++ test01<T, stdx::fixed_size_simd<T, stdx::simd_abi::max_fixed_size<T> - 4>>();
++ }
++
++int main()
++{
++ iterate_abis<char>();
++ iterate_abis<wchar_t>();
++ iterate_abis<char16_t>();
++ iterate_abis<char32_t>();
++
++ iterate_abis<signed char>();
++ iterate_abis<unsigned char>();
++ iterate_abis<short>();
++ iterate_abis<unsigned short>();
++ iterate_abis<int>();
++ iterate_abis<unsigned int>();
++ iterate_abis<long>();
++ iterate_abis<unsigned long>();
++ iterate_abis<long long>();
++ iterate_abis<unsigned long long>();
++ iterate_abis<float>();
++ iterate_abis<double>();
++ iterate_abis<long double>();
++}
+new file mode 100644
+--- /dev/null
++++ b/src/libstdc++-v3/testsuite/experimental/simd/pr109822_cast_functions.cc
+@@ -0,0 +1,63 @@
++// { dg-options "-std=gnu++17" }
++// { dg-do compile { target c++17 } }
++
++#include <experimental/simd>
++
++namespace stdx = std::experimental;
++
++template <typename T, typename V>
++ void
++ test01()
++ {
++ using M = typename V::mask_type;
++ [[maybe_unused]] auto x = to_fixed_size(V());
++ [[maybe_unused]] auto k = to_fixed_size(M());
++ if constexpr (stdx::simd<T>::size() == V::size())
++ {
++ [[maybe_unused]] auto xx = to_compatible(x);
++ [[maybe_unused]] auto kk = to_compatible(k);
++ x = to_fixed_size(xx);
++ k = to_fixed_size(kk);
++ }
++ if constexpr (stdx::native_simd<T>::size() == V::size())
++ {
++ [[maybe_unused]] auto xx = to_native(x);
++ [[maybe_unused]] auto kk = to_native(k);
++ x = to_fixed_size(xx);
++ k = to_fixed_size(kk);
++ }
++ }
++
++template <typename T>
++ void
++ iterate_abis()
++ {
++ test01<T, stdx::simd<T, stdx::simd_abi::scalar>>();
++ test01<T, stdx::simd<T>>();
++ test01<T, stdx::native_simd<T>>();
++ test01<T, stdx::fixed_size_simd<T, 3>>();
++ test01<T, stdx::fixed_size_simd<T, stdx::simd_abi::max_fixed_size<T> - 4>>();
++ }
++
++int
++main()
++{
++ iterate_abis<char>();
++ iterate_abis<wchar_t>();
++ iterate_abis<char16_t>();
++ iterate_abis<char32_t>();
++
++ iterate_abis<signed char>();
++ iterate_abis<unsigned char>();
++ iterate_abis<short>();
++ iterate_abis<unsigned short>();
++ iterate_abis<int>();
++ iterate_abis<unsigned int>();
++ iterate_abis<long>();
++ iterate_abis<unsigned long>();
++ iterate_abis<long long>();
++ iterate_abis<unsigned long long>();
++ iterate_abis<float>();
++ iterate_abis<double>();
++ iterate_abis<long double>();
++}
+--- a/src/libstdc++-v3/testsuite/experimental/simd/tests/fpclassify.cc
++++ b/src/libstdc++-v3/testsuite/experimental/simd/tests/fpclassify.cc
+@@ -40,9 +40,11 @@ template <typename V>
+ {
+ using T = typename V::value_type;
+ using intv = std::experimental::fixed_size_simd<int, V::size()>;
++#if __GCC_IEC_559 >= 2
+ constexpr T inf = std::__infinity_v<T>;
+ constexpr T denorm_min = std::__infinity_v<T>;
+ constexpr T nan = std::__quiet_NaN_v<T>;
++#endif
+ constexpr T max = std::__finite_max_v<T>;
+ constexpr T norm_min = std::__norm_min_v<T>;
+ test_values<V>(
+--- a/src/libstdc++-v3/testsuite/experimental/simd/tests/frexp.cc
++++ b/src/libstdc++-v3/testsuite/experimental/simd/tests/frexp.cc
+@@ -27,11 +27,17 @@ template <typename V>
+ {
+ using int_v = std::experimental::fixed_size_simd<int, V::size()>;
+ using T = typename V::value_type;
++#if __GCC_IEC_559 >= 2 || defined __STDC_IEC_559__
+ constexpr auto denorm_min = std::__denorm_min_v<T>;
++#endif
++#if __GCC_IEC_559 >= 2
+ constexpr auto norm_min = std::__norm_min_v<T>;
++#endif
+ constexpr auto max = std::__finite_max_v<T>;
++#if defined __STDC_IEC_559__
+ constexpr auto nan = std::__quiet_NaN_v<T>;
+ constexpr auto inf = std::__infinity_v<T>;
++#endif
+ test_values<V>(
+ {0, 0.25, 0.5, 1, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 32, 31, -0., -0.25, -0.5, -1,
+--- a/src/libstdc++-v3/testsuite/experimental/simd/tests/integer_operators.cc
++++ b/src/libstdc++-v3/testsuite/experimental/simd/tests/integer_operators.cc
+@@ -180,11 +180,10 @@ template <typename V>
+ for (int j = 0; j < 100; ++j)
+ {
+ const V seq([&](auto i) -> T { return (j + i) % n_promo_bits; });
+- COMPARE(V(1) >> seq, V([&](auto i) { return T(T(1) >> seq[i]); }))
+- << "seq = " << seq;
+- COMPARE(make_value_unknown(V(1)) >> make_value_unknown(seq),
+- V([&](auto i) { return T(T(1) >> seq[i]); }))
+- << "seq = " << seq;
++ const V expect([&](auto i) { return seq[i] == 0 ? T(1) : T(0); });
++ COMPARE(V(1) >> seq, expect) << "\nseq = " << seq;
++ COMPARE(make_value_unknown(V(1)) >> make_value_unknown(seq), expect)
++ << "\nseq = " << seq;
+ }
+ for_constexpr<int, 0, n_promo_bits - 1>([](auto shift_ic) {
+ constexpr int shift = shift_ic;
+--- a/src/libstdc++-v3/testsuite/experimental/simd/tests/ldexp_scalbn_scalbln_modf.cc
++++ b/src/libstdc++-v3/testsuite/experimental/simd/tests/ldexp_scalbn_scalbln_modf.cc
+@@ -139,7 +139,6 @@ template <typename V>
+ if (modf_is_broken)
+ return;
+ V integral = {};
+- const V totest = modf(input, &integral);
+ auto&& expected = [&](const auto& v) -> std::pair<const V, const V> {
+ std::pair<V, V> tmp = {};
+ using std::modf;
+@@ -151,8 +150,9 @@ template <typename V>
+ }
+ return tmp;
+ };
+- const auto expect1 = expected(input);
+ #ifdef __STDC_IEC_559__
++ const V totest = modf(input, &integral);
++ const auto expect1 = expected(input);
+ COMPARE(isnan(totest), isnan(expect1.first))
+ << "modf(" << input << ", iptr) = " << totest << " != " << expect1;
+ COMPARE(isnan(integral), isnan(expect1.second))
+--- a/src/libstdc++-v3/testsuite/experimental/simd/tests/logarithm.cc
++++ b/src/libstdc++-v3/testsuite/experimental/simd/tests/logarithm.cc
+@@ -30,11 +30,13 @@ template <typename V>
+ vir::test::setFuzzyness<double>(1);
+
+ using T = typename V::value_type;
++#ifdef __STDC_IEC_559__
+ constexpr T nan = std::__quiet_NaN_v<T>;
+ constexpr T inf = std::__infinity_v<T>;
+ constexpr T denorm_min = std::__denorm_min_v<T>;
+- constexpr T norm_min = std::__norm_min_v<T>;
+ constexpr T min = std::__finite_min_v<T>;
++#endif
++ constexpr T norm_min = std::__norm_min_v<T>;
+ constexpr T max = std::__finite_max_v<T>;
+ test_values<V>({1,
+ 2,
+--- a/src/libstdc++-v3/testsuite/experimental/simd/tests/operator_cvt.cc
++++ b/src/libstdc++-v3/testsuite/experimental/simd/tests/operator_cvt.cc
+@@ -220,8 +220,6 @@ template <typename V>
+ binary_op_return_type<vldouble, short>();
+ binary_op_return_type<vldouble, ushort>();
+ binary_op_return_type<vldouble, uint>();
+- binary_op_return_type<vldouble, long>();
+- binary_op_return_type<vldouble, ulong>();
+ binary_op_return_type<vldouble, float>();
+ binary_op_return_type<vldouble, double>();
+
+@@ -231,8 +229,6 @@ template <typename V>
+ binary_op_return_type<vf64<long double>, ushort>();
+ binary_op_return_type<vf64<long double>, int>();
+ binary_op_return_type<vf64<long double>, uint>();
+- binary_op_return_type<vf64<long double>, long>();
+- binary_op_return_type<vf64<long double>, ulong>();
+ binary_op_return_type<vf64<long double>, float>();
+ binary_op_return_type<vf64<long double>, double>();
+ binary_op_return_type<vf64<long double>, vf64<long double>>();
+@@ -245,8 +241,6 @@ template <typename V>
+ binary_op_return_type<simd<long double, A>, ushort>();
+ binary_op_return_type<simd<long double, A>, int>();
+ binary_op_return_type<simd<long double, A>, uint>();
+- binary_op_return_type<simd<long double, A>, long>();
+- binary_op_return_type<simd<long double, A>, ulong>();
+ binary_op_return_type<simd<long double, A>, float>();
+ binary_op_return_type<simd<long double, A>, double>();
+
+@@ -258,6 +252,24 @@ template <typename V>
+ VERIFY((is_substitution_failure<vf64<ldouble>, ullong>));
+ VERIFY((is_substitution_failure<simd<ldouble, A>, llong>));
+ VERIFY((is_substitution_failure<simd<ldouble, A>, ullong>));
++ if constexpr (sizeof(long) == sizeof(llong))
++ {
++ VERIFY((is_substitution_failure<vldouble, long>));
++ VERIFY((is_substitution_failure<vldouble, ulong>));
++ VERIFY((is_substitution_failure<vf64<ldouble>, long>));
++ VERIFY((is_substitution_failure<vf64<ldouble>, ulong>));
++ VERIFY((is_substitution_failure<simd<ldouble, A>, long>));
++ VERIFY((is_substitution_failure<simd<ldouble, A>, ulong>));
++ }
++ else
++ {
++ binary_op_return_type<vldouble, long>();
++ binary_op_return_type<vldouble, ulong>();
++ binary_op_return_type<vf64<long double>, long>();
++ binary_op_return_type<vf64<long double>, ulong>();
++ binary_op_return_type<simd<long double, A>, long>();
++ binary_op_return_type<simd<long double, A>, ulong>();
++ }
+ }
+ else
+ {
+@@ -267,6 +279,12 @@ template <typename V>
+ binary_op_return_type<vf64<long double>, ullong>();
+ binary_op_return_type<simd<long double, A>, llong>();
+ binary_op_return_type<simd<long double, A>, ullong>();
++ binary_op_return_type<vldouble, long>();
++ binary_op_return_type<vldouble, ulong>();
++ binary_op_return_type<vf64<long double>, long>();
++ binary_op_return_type<vf64<long double>, ulong>();
++ binary_op_return_type<simd<long double, A>, long>();
++ binary_op_return_type<simd<long double, A>, ulong>();
+ }
+
+ VERIFY((is_substitution_failure<vf64<long double>, vldouble>));
+--- a/src/libstdc++-v3/testsuite/experimental/simd/tests/reductions.cc
++++ b/src/libstdc++-v3/testsuite/experimental/simd/tests/reductions.cc
+@@ -114,6 +114,7 @@ template <typename V>
+ T acc = x[0];
+ for (size_t i = 1; i < V::size(); ++i)
+ acc += x[i];
+- ULP_COMPARE(reduce(x), acc, V::size() / 2).on_failure("x = ", x);
++ const T max_distance = std::is_integral_v<T> ? 0 : V::size() / 2;
++ ULP_COMPARE(reduce(x), acc, max_distance).on_failure("x = ", x);
+ });
+ }
+--- a/src/libstdc++-v3/testsuite/experimental/simd/tests/trunc_ceil_floor.cc
++++ b/src/libstdc++-v3/testsuite/experimental/simd/tests/trunc_ceil_floor.cc
+@@ -25,8 +25,10 @@ template <typename V>
+ test()
+ {
+ using T = typename V::value_type;
++#ifdef __STDC_IEC_559__
+ constexpr T inf = std::__infinity_v<T>;
+ constexpr T denorm_min = std::__denorm_min_v<T>;
++#endif
+ constexpr T norm_min = std::__norm_min_v<T>;
+ constexpr T max = std::__finite_max_v<T>;
+ constexpr T min = std::__finite_min_v<T>;